repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseBlock.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_BLOCK_H
#define EIGEN_SPARSE_BLOCK_H
namespace Eigen {
// Subset of columns or rows
template<typename XprType, int BlockRows, int BlockCols>
class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
{
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
protected:
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
typedef SparseMatrixBase<BlockType> Base;
using Base::convert_index;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
inline BlockImpl(XprType& xpr, Index i)
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
{}
inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
{}
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
Index nonZeros() const
{
typedef internal::evaluator<XprType> EvaluatorType;
EvaluatorType matEval(m_matrix);
Index nnz = 0;
Index end = m_outerStart + m_outerSize.value();
for(Index j=m_outerStart; j<end; ++j)
for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
++nnz;
return nnz;
}
inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
inline const XprType& nestedExpression() const { return m_matrix; }
inline XprType& nestedExpression() { return m_matrix; }
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
typename internal::ref_selector<XprType>::non_const_type m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
protected:
// Disable assignment with clear error message.
// Note that simply removing operator= yields compilation errors with ICC+MSVC
template<typename T>
BlockImpl& operator=(const T&)
{
EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
return *this;
}
};
/***************************************************************************
* specialization for SparseMatrix
***************************************************************************/
namespace internal {
template<typename SparseMatrixType, int BlockRows, int BlockCols>
class sparse_matrix_block_impl
: public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
{
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
using Base::convert_index;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
protected:
typedef typename Base::IndexVector IndexVector;
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
public:
inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
{}
inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
{}
template<typename OtherDerived>
inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
{
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
_NestedMatrixType& matrix = m_matrix;
// This assignment is slow if this vector set is not empty
// and/or it is not at the end of the nonzeros of the underlying matrix.
// 1 - eval to a temporary to avoid transposition and/or aliasing issues
Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
// 2 - let's check whether there is enough allocated memory
Index nnz = tmp.nonZeros();
Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
Index block_size = end - start; // available room in the current block
Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
Index free_size = m_matrix.isCompressed()
? Index(matrix.data().allocatedSize()) + block_size
: block_size;
Index tmp_start = tmp.outerIndexPtr()[0];
bool update_trailing_pointers = false;
if(nnz>free_size)
{
// realloc manually to reduce copies
typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
matrix.data().swap(newdata);
update_trailing_pointers = true;
}
else
{
if(m_matrix.isCompressed())
{
// no need to realloc, simply copy the tail at its respective position and insert tmp
matrix.data().resize(start + nnz + tail_size);
internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz);
internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
update_trailing_pointers = true;
}
internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
}
// update outer index pointers and innerNonZeros
if(IsVectorAtCompileTime)
{
if(!m_matrix.isCompressed())
matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
}
else
{
StorageIndex p = StorageIndex(start);
for(Index k=0; k<m_outerSize.value(); ++k)
{
StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
if(!m_matrix.isCompressed())
matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
matrix.outerIndexPtr()[m_outerStart+k] = p;
p += nnz_k;
}
}
if(update_trailing_pointers)
{
StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
{
matrix.outerIndexPtr()[k] += offset;
}
}
return derived();
}
inline BlockType& operator=(const BlockType& other)
{
return operator=<BlockType>(other);
}
inline const Scalar* valuePtr() const
{ return m_matrix.valuePtr(); }
inline Scalar* valuePtr()
{ return m_matrix.valuePtr(); }
inline const StorageIndex* innerIndexPtr() const
{ return m_matrix.innerIndexPtr(); }
inline StorageIndex* innerIndexPtr()
{ return m_matrix.innerIndexPtr(); }
inline const StorageIndex* outerIndexPtr() const
{ return m_matrix.outerIndexPtr() + m_outerStart; }
inline StorageIndex* outerIndexPtr()
{ return m_matrix.outerIndexPtr() + m_outerStart; }
inline const StorageIndex* innerNonZeroPtr() const
{ return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
inline StorageIndex* innerNonZeroPtr()
{ return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
const Scalar& lastCoeff() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
eigen_assert(Base::nonZeros()>0);
if(m_matrix.isCompressed())
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
else
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
}
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
inline SparseMatrixType& nestedExpression() { return m_matrix; }
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
};
} // namespace internal
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
: public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
{
public:
typedef _StorageIndex StorageIndex;
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
inline BlockImpl(SparseMatrixType& xpr, Index i)
: Base(xpr, i)
{}
inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: Base(xpr, startRow, startCol, blockRows, blockCols)
{}
using Base::operator=;
};
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
: public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
{
public:
typedef _StorageIndex StorageIndex;
typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
inline BlockImpl(SparseMatrixType& xpr, Index i)
: Base(xpr, i)
{}
inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: Base(xpr, startRow, startCol, blockRows, blockCols)
{}
using Base::operator=;
private:
template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
};
//----------
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major).
*/
template<typename Derived>
typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
{ return InnerVectorReturnType(derived(), outer); }
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
{ return ConstInnerVectorReturnType(derived(), outer); }
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major).
*/
template<typename Derived>
typename SparseMatrixBase<Derived>::InnerVectorsReturnType
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
{
return Block<Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
}
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
{
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
}
/** Generic implementation of sparse Block expression.
* Real-only.
*/
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
{
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
typedef SparseMatrixBase<BlockType> Base;
using Base::convert_index;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
/** Column or Row constructor
*/
inline BlockImpl(XprType& xpr, Index i)
: m_matrix(xpr),
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{}
/** Dynamic-size constructor
*/
inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
{}
inline Index rows() const { return m_blockRows.value(); }
inline Index cols() const { return m_blockCols.value(); }
inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
}
inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
}
inline Scalar& coeffRef(Index index)
{
return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
inline const XprType& nestedExpression() const { return m_matrix; }
inline XprType& nestedExpression() { return m_matrix; }
Index startRow() const { return m_startRow.value(); }
Index startCol() const { return m_startCol.value(); }
Index blockRows() const { return m_blockRows.value(); }
Index blockCols() const { return m_blockCols.value(); }
protected:
// friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
Index nonZeros() const { return Dynamic; }
typename internal::ref_selector<XprType>::non_const_type m_matrix;
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
protected:
// Disable assignment with clear error message.
// Note that simply removing operator= yields compilation errors with ICC+MSVC
template<typename T>
BlockImpl& operator=(const T&)
{
EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
return *this;
}
};
namespace internal {
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
: public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
{
class InnerVectorInnerIterator;
class OuterVectorInnerIterator;
public:
typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename XprType::Scalar Scalar;
enum {
IsRowMajor = XprType::IsRowMajor,
OuterVector = (BlockCols==1 && ArgType::IsRowMajor)
| // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
// revert to || as soon as not needed anymore.
(BlockRows==1 && !ArgType::IsRowMajor),
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
explicit unary_evaluator(const XprType& op)
: m_argImpl(op.nestedExpression()), m_block(op)
{}
inline Index nonZerosEstimate() const {
Index nnz = m_block.nonZeros();
if(nnz<0)
return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
return nnz;
}
protected:
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
evaluator<ArgType> m_argImpl;
const XprType &m_block;
};
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
: public EvalIterator
{
enum { IsRowMajor = unary_evaluator::IsRowMajor };
const XprType& m_block;
Index m_end;
public:
EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
: EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
m_block(aEval.m_block),
m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
{
while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
EvalIterator::operator++();
}
inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }
inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
};
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
{
enum { IsRowMajor = unary_evaluator::IsRowMajor };
const unary_evaluator& m_eval;
Index m_outerPos;
const Index m_innerIndex;
Index m_end;
EvalIterator m_it;
public:
EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
: m_eval(aEval),
m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
m_it(m_eval.m_argImpl, m_outerPos)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
while(m_it && m_it.index() < m_innerIndex) ++m_it;
if((!m_it) || (m_it.index()!=m_innerIndex))
++(*this);
}
inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
inline Index outer() const { return 0; }
inline Index row() const { return IsRowMajor ? 0 : index(); }
inline Index col() const { return IsRowMajor ? index() : 0; }
inline Scalar value() const { return m_it.value(); }
inline Scalar& valueRef() { return m_it.valueRef(); }
inline OuterVectorInnerIterator& operator++()
{
// search next non-zero entry
while(++m_outerPos<m_end)
{
// Restart iterator at the next inner-vector:
m_it.~EvalIterator();
::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);
// search for the key m_innerIndex in the current outer-vector
while(m_it && m_it.index() < m_innerIndex) ++m_it;
if(m_it && m_it.index()==m_innerIndex) break;
}
return *this;
}
inline operator bool() const { return m_outerPos < m_end; }
};
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
: evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
{
typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
typedef evaluator<SparseCompressedBase<XprType> > Base;
explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
};
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
: evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
{
typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
typedef evaluator<SparseCompressedBase<XprType> > Base;
explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSE_BLOCK_H
| 25,592 | 41.372517 | 173 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseColEtree.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of sp_coletree.c file in SuperLU
* -- SuperLU routine (version 3.1) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* August 1, 2008
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSE_COLETREE_H
#define SPARSE_COLETREE_H
namespace Eigen {
namespace internal {
/** Find the root of the tree/set containing the vertex i : Use Path halving */
template<typename Index, typename IndexVector>
Index etree_find (Index i, IndexVector& pp)
{
Index p = pp(i); // Parent
Index gp = pp(p); // Grand parent
while (gp != p)
{
pp(i) = gp; // Parent pointer on find path is changed to former grand parent
i = gp;
p = pp(i);
gp = pp(p);
}
return p;
}
/** Compute the column elimination tree of a sparse matrix
* \param mat The matrix in column-major format.
* \param parent The elimination tree
* \param firstRowElt The column index of the first element in each row
* \param perm The permutation to apply to the column of \b mat
*/
template <typename MatrixType, typename IndexVector>
int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0)
{
typedef typename MatrixType::StorageIndex StorageIndex;
StorageIndex nc = convert_index<StorageIndex>(mat.cols()); // Number of columns
StorageIndex m = convert_index<StorageIndex>(mat.rows());
StorageIndex diagSize = (std::min)(nc,m);
IndexVector root(nc); // root of subtree of etree
root.setZero();
IndexVector pp(nc); // disjoint sets
pp.setZero(); // Initialize disjoint sets
parent.resize(mat.cols());
//Compute first nonzero column in each row
firstRowElt.resize(m);
firstRowElt.setConstant(nc);
firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
bool found_diag;
for (StorageIndex col = 0; col < nc; col++)
{
StorageIndex pcol = col;
if(perm) pcol = perm[col];
for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
{
Index row = it.row();
firstRowElt(row) = (std::min)(firstRowElt(row), col);
}
}
/* Compute etree by Liu's algorithm for symmetric matrices,
except use (firstRowElt[r],c) in place of an edge (r,c) of A.
Thus each row clique in A'*A is replaced by a star
centered at its first vertex, which has the same fill. */
StorageIndex rset, cset, rroot;
for (StorageIndex col = 0; col < nc; col++)
{
found_diag = col>=m;
pp(col) = col;
cset = col;
root(cset) = col;
parent(col) = nc;
/* The diagonal element is treated here even if it does not exist in the matrix
* hence the loop is executed once more */
StorageIndex pcol = col;
if(perm) pcol = perm[col];
for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
{ // A sequence of interleaved find and union is performed
Index i = col;
if(it) i = it.index();
if (i == col) found_diag = true;
StorageIndex row = firstRowElt(i);
if (row >= col) continue;
rset = internal::etree_find(row, pp); // Find the name of the set containing row
rroot = root(rset);
if (rroot != col)
{
parent(rroot) = col;
pp(cset) = rset;
cset = rset;
root(cset) = col;
}
}
}
return 0;
}
/**
* Depth-first search from vertex n. No recursion.
* This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
*/
template <typename IndexVector>
void nr_etdfs (typename IndexVector::Scalar n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, typename IndexVector::Scalar postnum)
{
typedef typename IndexVector::Scalar StorageIndex;
StorageIndex current = n, first, next;
while (postnum != n)
{
// No kid for the current node
first = first_kid(current);
// no kid for the current node
if (first == -1)
{
// Numbering this node because it has no kid
post(current) = postnum++;
// looking for the next kid
next = next_kid(current);
while (next == -1)
{
// No more kids : back to the parent node
current = parent(current);
// numbering the parent node
post(current) = postnum++;
// Get the next kid
next = next_kid(current);
}
// stopping criterion
if (postnum == n+1) return;
// Updating current node
current = next;
}
else
{
current = first;
}
}
}
/**
* \brief Post order a tree
* \param n the number of nodes
* \param parent Input tree
* \param post postordered tree
*/
template <typename IndexVector>
void treePostorder(typename IndexVector::Scalar n, IndexVector& parent, IndexVector& post)
{
typedef typename IndexVector::Scalar StorageIndex;
IndexVector first_kid, next_kid; // Linked list of children
StorageIndex postnum;
// Allocate storage for working arrays and results
first_kid.resize(n+1);
next_kid.setZero(n+1);
post.setZero(n+1);
// Set up structure describing children
first_kid.setConstant(-1);
for (StorageIndex v = n-1; v >= 0; v--)
{
StorageIndex dad = parent(v);
next_kid(v) = first_kid(dad);
first_kid(dad) = v;
}
// Depth-first search from dummy root vertex #n
postnum = 0;
internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
}
} // end namespace internal
} // end namespace Eigen
#endif // SPARSE_COLETREE_H
| 6,482 | 30.318841 | 171 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseCompressedBase.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_COMPRESSED_BASE_H
#define EIGEN_SPARSE_COMPRESSED_BASE_H
namespace Eigen {
template<typename Derived> class SparseCompressedBase;
namespace internal {
template<typename Derived>
struct traits<SparseCompressedBase<Derived> > : traits<Derived>
{};
} // end namespace internal
/** \ingroup SparseCore_Module
* \class SparseCompressedBase
* \brief Common base class for sparse [compressed]-{row|column}-storage format.
*
* This class defines the common interface for all derived classes implementing the compressed sparse storage format, such as:
* - SparseMatrix
* - Ref<SparseMatrixType,Options>
* - Map<SparseMatrixType>
*
*/
template<typename Derived>
class SparseCompressedBase
: public SparseMatrixBase<Derived>
{
public:
typedef SparseMatrixBase<Derived> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseCompressedBase)
using Base::operator=;
using Base::IsRowMajor;
class InnerIterator;
class ReverseInnerIterator;
protected:
typedef typename Base::IndexVector IndexVector;
Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
public:
/** \returns the number of non zero coefficients */
inline Index nonZeros() const
{
if(Derived::IsVectorAtCompileTime && outerIndexPtr()==0)
return derived().nonZeros();
else if(isCompressed())
return outerIndexPtr()[derived().outerSize()]-outerIndexPtr()[0];
else if(derived().outerSize()==0)
return 0;
else
return innerNonZeros().sum();
}
/** \returns a const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline const Scalar* valuePtr() const { return derived().valuePtr(); }
/** \returns a non-const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline Scalar* valuePtr() { return derived().valuePtr(); }
/** \returns a const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline const StorageIndex* innerIndexPtr() const { return derived().innerIndexPtr(); }
/** \returns a non-const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline StorageIndex* innerIndexPtr() { return derived().innerIndexPtr(); }
/** \returns a const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 for SparseVector
* \sa valuePtr(), innerIndexPtr() */
inline const StorageIndex* outerIndexPtr() const { return derived().outerIndexPtr(); }
/** \returns a non-const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 for SparseVector
* \sa valuePtr(), innerIndexPtr() */
inline StorageIndex* outerIndexPtr() { return derived().outerIndexPtr(); }
/** \returns a const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
inline const StorageIndex* innerNonZeroPtr() const { return derived().innerNonZeroPtr(); }
/** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
inline StorageIndex* innerNonZeroPtr() { return derived().innerNonZeroPtr(); }
/** \returns whether \c *this is in compressed form. */
inline bool isCompressed() const { return innerNonZeroPtr()==0; }
/** \returns a read-only view of the stored coefficients as a 1D array expression.
*
* \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
*
* \sa valuePtr(), isCompressed() */
const Map<const Array<Scalar,Dynamic,1> > coeffs() const { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
/** \returns a read-write view of the stored coefficients as a 1D array expression
*
* \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
*
* Here is an example:
* \include SparseMatrix_coeffs.cpp
* and the output is:
* \include SparseMatrix_coeffs.out
*
* \sa valuePtr(), isCompressed() */
Map<Array<Scalar,Dynamic,1> > coeffs() { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
protected:
/** Default constructor. Do nothing. */
SparseCompressedBase() {}
private:
template<typename OtherDerived> explicit SparseCompressedBase(const SparseCompressedBase<OtherDerived>&);
};
template<typename Derived>
class SparseCompressedBase<Derived>::InnerIterator
{
public:
InnerIterator()
: m_values(0), m_indices(0), m_outer(0), m_id(0), m_end(0)
{}
InnerIterator(const InnerIterator& other)
: m_values(other.m_values), m_indices(other.m_indices), m_outer(other.m_outer), m_id(other.m_id), m_end(other.m_end)
{}
InnerIterator& operator=(const InnerIterator& other)
{
m_values = other.m_values;
m_indices = other.m_indices;
const_cast<OuterType&>(m_outer).setValue(other.m_outer.value());
m_id = other.m_id;
m_end = other.m_end;
return *this;
}
InnerIterator(const SparseCompressedBase& mat, Index outer)
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)
{
if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)
{
m_id = 0;
m_end = mat.nonZeros();
}
else
{
m_id = mat.outerIndexPtr()[outer];
if(mat.isCompressed())
m_end = mat.outerIndexPtr()[outer+1];
else
m_end = m_id + mat.innerNonZeroPtr()[outer];
}
}
explicit InnerIterator(const SparseCompressedBase& mat)
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_id(0), m_end(mat.nonZeros())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
explicit InnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
: m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_id(0), m_end(data.size())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
inline InnerIterator& operator++() { m_id++; return *this; }
inline const Scalar& value() const { return m_values[m_id]; }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
inline StorageIndex index() const { return m_indices[m_id]; }
inline Index outer() const { return m_outer.value(); }
inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }
inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }
inline operator bool() const { return (m_id < m_end); }
protected:
const Scalar* m_values;
const StorageIndex* m_indices;
typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;
const OuterType m_outer;
Index m_id;
Index m_end;
private:
// If you get here, then you're not using the right InnerIterator type, e.g.:
// SparseMatrix<double,RowMajor> A;
// SparseMatrix<double>::InnerIterator it(A,0);
template<typename T> InnerIterator(const SparseMatrixBase<T>&, Index outer);
};
template<typename Derived>
class SparseCompressedBase<Derived>::ReverseInnerIterator
{
public:
ReverseInnerIterator(const SparseCompressedBase& mat, Index outer)
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)
{
if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)
{
m_start = 0;
m_id = mat.nonZeros();
}
else
{
m_start = mat.outerIndexPtr()[outer];
if(mat.isCompressed())
m_id = mat.outerIndexPtr()[outer+1];
else
m_id = m_start + mat.innerNonZeroPtr()[outer];
}
}
explicit ReverseInnerIterator(const SparseCompressedBase& mat)
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_start(0), m_id(mat.nonZeros())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
explicit ReverseInnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
: m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_start(0), m_id(data.size())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
inline ReverseInnerIterator& operator--() { --m_id; return *this; }
inline const Scalar& value() const { return m_values[m_id-1]; }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
inline StorageIndex index() const { return m_indices[m_id-1]; }
inline Index outer() const { return m_outer.value(); }
inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }
inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }
inline operator bool() const { return (m_id > m_start); }
protected:
const Scalar* m_values;
const StorageIndex* m_indices;
typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;
const OuterType m_outer;
Index m_start;
Index m_id;
};
namespace internal {
template<typename Derived>
struct evaluator<SparseCompressedBase<Derived> >
: evaluator_base<Derived>
{
typedef typename Derived::Scalar Scalar;
typedef typename Derived::InnerIterator InnerIterator;
enum {
CoeffReadCost = NumTraits<Scalar>::ReadCost,
Flags = Derived::Flags
};
evaluator() : m_matrix(0), m_zero(0)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
explicit evaluator(const Derived &mat) : m_matrix(&mat), m_zero(0)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_matrix->nonZeros();
}
operator Derived&() { return m_matrix->const_cast_derived(); }
operator const Derived&() const { return *m_matrix; }
typedef typename DenseCoeffsBase<Derived,ReadOnlyAccessors>::CoeffReturnType CoeffReturnType;
const Scalar& coeff(Index row, Index col) const
{
Index p = find(row,col);
if(p==Dynamic)
return m_zero;
else
return m_matrix->const_cast_derived().valuePtr()[p];
}
Scalar& coeffRef(Index row, Index col)
{
Index p = find(row,col);
eigen_assert(p!=Dynamic && "written coefficient does not exist");
return m_matrix->const_cast_derived().valuePtr()[p];
}
protected:
Index find(Index row, Index col) const
{
eigen_internal_assert(row>=0 && row<m_matrix->rows() && col>=0 && col<m_matrix->cols());
const Index outer = Derived::IsRowMajor ? row : col;
const Index inner = Derived::IsRowMajor ? col : row;
Index start = m_matrix->outerIndexPtr()[outer];
Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];
eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner) - m_matrix->innerIndexPtr();
return ((p<end) && (m_matrix->innerIndexPtr()[p]==inner)) ? p : Dynamic;
}
const Derived *m_matrix;
const Scalar m_zero;
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_COMPRESSED_BASE_H
| 12,720 | 36.195906 | 163 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
#define EIGEN_SPARSE_CWISE_BINARY_OP_H
namespace Eigen {
// Here we have to handle 3 cases:
// 1 - sparse op dense
// 2 - dense op sparse
// 3 - sparse op sparse
// We also need to implement a 4th iterator for:
// 4 - dense op dense
// Finally, we also need to distinguish between the product and other operations :
// configuration returned mode
// 1 - sparse op dense product sparse
// generic dense
// 2 - dense op sparse product sparse
// generic dense
// 3 - sparse op sparse product sparse
// generic sparse
// 4 - dense op dense product dense
// generic dense
//
// TODO to ease compiler job, we could specialize product/quotient with a scalar
// and fallback to cwise-unary evaluator using bind1st_op and bind2nd_op.
template<typename BinaryOp, typename Lhs, typename Rhs>
class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
: public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
public:
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
typedef SparseMatrixBase<Derived> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
CwiseBinaryOpImpl()
{
EIGEN_STATIC_ASSERT((
(!internal::is_same<typename internal::traits<Lhs>::StorageKind,
typename internal::traits<Rhs>::StorageKind>::value)
|| ((internal::evaluator<Lhs>::Flags&RowMajorBit) == (internal::evaluator<Rhs>::Flags&RowMajorBit))),
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
}
};
namespace internal {
// Generic "sparse OP sparse"
template<typename XprType> struct binary_sparse_evaluator;
template<typename BinaryOp, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IteratorBased>
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
protected:
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
typedef typename traits<XprType>::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
public:
class InnerIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
{
this->operator++();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
{
m_id = m_lhsIter.index();
m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
++m_lhsIter;
++m_rhsIter;
}
else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
{
m_id = m_lhsIter.index();
m_value = m_functor(m_lhsIter.value(), Scalar(0));
++m_lhsIter;
}
else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
{
m_id = m_rhsIter.index();
m_value = m_functor(Scalar(0), m_rhsIter.value());
++m_rhsIter;
}
else
{
m_value = 0; // this is to avoid a compilation warning
m_id = -1;
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
protected:
LhsIterator m_lhsIter;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
Scalar m_value;
StorageIndex m_id;
};
enum {
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
Flags = XprType::Flags
};
explicit binary_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_lhsImpl.nonZerosEstimate() + m_rhsImpl.nonZerosEstimate();
}
protected:
const BinaryOp m_functor;
evaluator<Lhs> m_lhsImpl;
evaluator<Rhs> m_rhsImpl;
};
// dense op sparse
template<typename BinaryOp, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IteratorBased>
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
protected:
typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
typedef typename traits<XprType>::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
public:
class InnerIterator
{
enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
: m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.rhs().innerSize())
{
this->operator++();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_id;
if(m_id<m_innerSize)
{
Scalar lhsVal = m_lhsEval.coeff(IsRowMajor?m_rhsIter.outer():m_id,
IsRowMajor?m_id:m_rhsIter.outer());
if(m_rhsIter && m_rhsIter.index()==m_id)
{
m_value = m_functor(lhsVal, m_rhsIter.value());
++m_rhsIter;
}
else
m_value = m_functor(lhsVal, Scalar(0));
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_rhsIter.outer() : m_id; }
EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_rhsIter.outer(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
protected:
const evaluator<Lhs> &m_lhsEval;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
Scalar m_value;
StorageIndex m_id;
StorageIndex m_innerSize;
};
enum {
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
// Expose storage order of the sparse expression
Flags = (XprType::Flags & ~RowMajorBit) | (int(Rhs::Flags)&RowMajorBit)
};
explicit binary_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs()),
m_expr(xpr)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_expr.size();
}
protected:
const BinaryOp m_functor;
evaluator<Lhs> m_lhsImpl;
evaluator<Rhs> m_rhsImpl;
const XprType &m_expr;
};
// sparse op dense
template<typename BinaryOp, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IndexBased>
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
protected:
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
typedef typename traits<XprType>::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
public:
class InnerIterator
{
enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.lhs().innerSize())
{
this->operator++();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_id;
if(m_id<m_innerSize)
{
Scalar rhsVal = m_rhsEval.coeff(IsRowMajor?m_lhsIter.outer():m_id,
IsRowMajor?m_id:m_lhsIter.outer());
if(m_lhsIter && m_lhsIter.index()==m_id)
{
m_value = m_functor(m_lhsIter.value(), rhsVal);
++m_lhsIter;
}
else
m_value = m_functor(Scalar(0),rhsVal);
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_lhsIter.outer() : m_id; }
EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_lhsIter.outer(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
protected:
LhsIterator m_lhsIter;
const evaluator<Rhs> &m_rhsEval;
const BinaryOp& m_functor;
Scalar m_value;
StorageIndex m_id;
StorageIndex m_innerSize;
};
enum {
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
// Expose storage order of the sparse expression
Flags = (XprType::Flags & ~RowMajorBit) | (int(Lhs::Flags)&RowMajorBit)
};
explicit binary_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs()),
m_expr(xpr)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_expr.size();
}
protected:
const BinaryOp m_functor;
evaluator<Lhs> m_lhsImpl;
evaluator<Rhs> m_rhsImpl;
const XprType &m_expr;
};
template<typename T,
typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct sparse_conjunction_evaluator;
// "sparse .* sparse"
template<typename T1, typename T2, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IteratorBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "dense .* sparse"
template<typename T1, typename T2, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IndexBased, IteratorBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse .* dense"
template<typename T1, typename T2, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse ./ dense"
template<typename T1, typename T2, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse && sparse"
template<typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IteratorBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "dense && sparse"
template<typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IndexBased, IteratorBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse && dense"
template<typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IndexBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse ^ sparse"
template<typename XprType>
struct sparse_conjunction_evaluator<XprType, IteratorBased, IteratorBased>
: evaluator_base<XprType>
{
protected:
typedef typename XprType::Functor BinaryOp;
typedef typename XprType::Lhs LhsArg;
typedef typename XprType::Rhs RhsArg;
typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename traits<XprType>::Scalar Scalar;
public:
class InnerIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
{
while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
{
if (m_lhsIter.index() < m_rhsIter.index())
++m_lhsIter;
else
++m_rhsIter;
}
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_lhsIter;
++m_rhsIter;
while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
{
if (m_lhsIter.index() < m_rhsIter.index())
++m_lhsIter;
else
++m_rhsIter;
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
protected:
LhsIterator m_lhsIter;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
};
enum {
CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
Flags = XprType::Flags
};
explicit sparse_conjunction_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return (std::min)(m_lhsImpl.nonZerosEstimate(), m_rhsImpl.nonZerosEstimate());
}
protected:
const BinaryOp m_functor;
evaluator<LhsArg> m_lhsImpl;
evaluator<RhsArg> m_rhsImpl;
};
// "dense ^ sparse"
template<typename XprType>
struct sparse_conjunction_evaluator<XprType, IndexBased, IteratorBased>
: evaluator_base<XprType>
{
protected:
typedef typename XprType::Functor BinaryOp;
typedef typename XprType::Lhs LhsArg;
typedef typename XprType::Rhs RhsArg;
typedef evaluator<LhsArg> LhsEvaluator;
typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename traits<XprType>::Scalar Scalar;
public:
class InnerIterator
{
enum { IsRowMajor = (int(RhsArg::Flags)&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
: m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_outer(outer)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_rhsIter;
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const
{ return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); }
EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
protected:
const LhsEvaluator &m_lhsEval;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
const Index m_outer;
};
enum {
CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
// Expose storage order of the sparse expression
Flags = (XprType::Flags & ~RowMajorBit) | (int(RhsArg::Flags)&RowMajorBit)
};
explicit sparse_conjunction_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_rhsImpl.nonZerosEstimate();
}
protected:
const BinaryOp m_functor;
evaluator<LhsArg> m_lhsImpl;
evaluator<RhsArg> m_rhsImpl;
};
// "sparse ^ dense"
template<typename XprType>
struct sparse_conjunction_evaluator<XprType, IteratorBased, IndexBased>
: evaluator_base<XprType>
{
protected:
typedef typename XprType::Functor BinaryOp;
typedef typename XprType::Lhs LhsArg;
typedef typename XprType::Rhs RhsArg;
typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
typedef evaluator<RhsArg> RhsEvaluator;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename traits<XprType>::Scalar Scalar;
public:
class InnerIterator
{
enum { IsRowMajor = (int(LhsArg::Flags)&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_outer(outer)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_lhsIter;
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const
{ return m_functor(m_lhsIter.value(),
m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
protected:
LhsIterator m_lhsIter;
const evaluator<RhsArg> &m_rhsEval;
const BinaryOp& m_functor;
const Index m_outer;
};
enum {
CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
// Expose storage order of the sparse expression
Flags = (XprType::Flags & ~RowMajorBit) | (int(LhsArg::Flags)&RowMajorBit)
};
explicit sparse_conjunction_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_lhsImpl.nonZerosEstimate();
}
protected:
const BinaryOp m_functor;
evaluator<LhsArg> m_lhsImpl;
evaluator<RhsArg> m_rhsImpl;
};
}
/***************************************************************************
* Implementation of SparseMatrixBase and SparseCwise functions/operators
***************************************************************************/
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived &
SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
{
return derived() = derived() - other.derived();
}
template<typename Derived>
template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived &
SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
{
return derived() = derived() + other.derived();
}
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator+=(const DiagonalBase<OtherDerived>& other)
{
call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator-=(const DiagonalBase<OtherDerived>& other)
{
call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
{
return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
}
template<typename DenseDerived, typename SparseDerived>
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
operator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
{
return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
}
template<typename SparseDerived, typename DenseDerived>
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
operator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
{
return CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
}
template<typename DenseDerived, typename SparseDerived>
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
operator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
{
return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
}
template<typename SparseDerived, typename DenseDerived>
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
operator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
{
return CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
| 25,840 | 34.544704 | 184 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
#define EIGEN_SPARSE_CWISE_UNARY_OP_H
namespace Eigen {
namespace internal {
template<typename UnaryOp, typename ArgType>
struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
: public evaluator_base<CwiseUnaryOp<UnaryOp,ArgType> >
{
public:
typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
class InnerIterator;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
}
protected:
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
const UnaryOp m_functor;
evaluator<ArgType> m_argImpl;
};
template<typename UnaryOp, typename ArgType>
class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator
{
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{ Base::operator++(); return *this; }
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
protected:
const UnaryOp m_functor;
private:
Scalar& valueRef();
};
template<typename ViewOp, typename ArgType>
struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>
: public evaluator_base<CwiseUnaryView<ViewOp,ArgType> >
{
public:
typedef CwiseUnaryView<ViewOp, ArgType> XprType;
class InnerIterator;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<ViewOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
const ViewOp m_functor;
evaluator<ArgType> m_argImpl;
};
template<typename ViewOp, typename ArgType>
class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator
{
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{ Base::operator++(); return *this; }
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); }
protected:
const ViewOp m_functor;
};
} // end namespace internal
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator*=(const Scalar& other)
{
typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
for (EvalIterator i(thisEval,j); i; ++i)
i.valueRef() *= other;
return derived();
}
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator/=(const Scalar& other)
{
typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
for (EvalIterator i(thisEval,j); i; ++i)
i.valueRef() /= other;
return derived();
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
| 4,711 | 30.624161 | 107 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseDenseProduct.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
#define EIGEN_SPARSEDENSEPRODUCT_H
namespace Eigen {
namespace internal {
template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
typename AlphaType,
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct sparse_time_dense_product_impl;
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
typedef evaluator<Lhs> LhsEval;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
LhsEval lhsEval(lhs);
Index n = lhs.outerSize();
#ifdef EIGEN_HAS_OPENMP
Eigen::initParallel();
Index threads = Eigen::nbThreads();
#endif
for(Index c=0; c<rhs.cols(); ++c)
{
#ifdef EIGEN_HAS_OPENMP
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
// It basically represents the minimal amount of work to be done to be worth it.
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
{
#pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
else
#endif
{
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
}
}
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
{
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhsEval,i); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),col);
res.coeffRef(i,col) += alpha * tmp;
}
};
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
// {
// enum {
// Defined = 1
// };
// typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
// };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Res::RowXpr res_j(res.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res_j += (alpha*it.value()) * rhs.row(it.index());
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.row(it.index()) += (alpha*it.value()) * rhs_j;
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
: generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
{};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
// transpose everything
Transpose<Dst> dstT(dst);
internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
: generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
{};
template<typename LhsT, typename RhsT, bool NeedToTranspose>
struct sparse_dense_outer_product_evaluator
{
protected:
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
// if the actual left-hand side is a dense vector,
// then build a sparse-view so that we can seamlessly iterate over it.
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1, SparseView<Lhs1> >::type ActualLhs;
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
typedef evaluator<ActualLhs> LhsEval;
typedef evaluator<ActualRhs> RhsEval;
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
typedef typename ProdXprType::Scalar Scalar;
public:
enum {
Flags = NeedToTranspose ? RowMajorBit : 0,
CoeffReadCost = HugeCost
};
class InnerIterator : public LhsIterator
{
public:
InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
: LhsIterator(xprEval.m_lhsXprImpl, 0),
m_outer(outer),
m_empty(false),
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
{}
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
protected:
Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
{
return rhs.coeff(outer);
}
Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
{
typename RhsEval::InnerIterator it(rhs, outer);
if (it && it.index()==0 && it.value()!=Scalar(0))
return it.value();
m_empty = true;
return Scalar(0);
}
Index m_outer;
bool m_empty;
Scalar m_factor;
};
sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
// transpose case
sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
const LhsArg m_lhs;
evaluator<ActualLhs> m_lhsXprImpl;
evaluator<ActualRhs> m_rhsXprImpl;
};
// sparse * dense outer product
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
| 12,487 | 37.903427 | 147 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseDiagonalProduct.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
namespace Eigen {
// The product of a diagonal matrix with a sparse matrix can be easily
// implemented using expression template.
// We have two consider very different cases:
// 1 - diag * row-major sparse
// => each inner vector <=> scalar * sparse vector product
// => so we can reuse CwiseUnaryOp::InnerIterator
// 2 - diag * col-major sparse
// => each inner vector <=> densevector * sparse vector cwise product
// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
// for that particular case
// The two other cases are symmetric.
namespace internal {
enum {
SDP_AsScalarProduct,
SDP_AsCwiseProduct
};
template<typename SparseXprType, typename DiagonalCoeffType, int SDP_Tag>
struct sparse_diagonal_product_evaluator;
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, DiagonalShape, SparseShape>
: public sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct>
{
typedef Product<Lhs, Rhs, DefaultProduct> XprType;
enum { CoeffReadCost = HugeCost, Flags = Rhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags
typedef sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct> Base;
explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) {}
};
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, SparseShape, DiagonalShape>
: public sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct>
{
typedef Product<Lhs, Rhs, DefaultProduct> XprType;
enum { CoeffReadCost = HugeCost, Flags = Lhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags
typedef sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct> Base;
explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal().transpose()) {}
};
template<typename SparseXprType, typename DiagonalCoeffType>
struct sparse_diagonal_product_evaluator<SparseXprType, DiagonalCoeffType, SDP_AsScalarProduct>
{
protected:
typedef typename evaluator<SparseXprType>::InnerIterator SparseXprInnerIterator;
typedef typename SparseXprType::Scalar Scalar;
public:
class InnerIterator : public SparseXprInnerIterator
{
public:
InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
: SparseXprInnerIterator(xprEval.m_sparseXprImpl, outer),
m_coeff(xprEval.m_diagCoeffImpl.coeff(outer))
{}
EIGEN_STRONG_INLINE Scalar value() const { return m_coeff * SparseXprInnerIterator::value(); }
protected:
typename DiagonalCoeffType::Scalar m_coeff;
};
sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagonalCoeffType &diagCoeff)
: m_sparseXprImpl(sparseXpr), m_diagCoeffImpl(diagCoeff)
{}
Index nonZerosEstimate() const { return m_sparseXprImpl.nonZerosEstimate(); }
protected:
evaluator<SparseXprType> m_sparseXprImpl;
evaluator<DiagonalCoeffType> m_diagCoeffImpl;
};
template<typename SparseXprType, typename DiagCoeffType>
struct sparse_diagonal_product_evaluator<SparseXprType, DiagCoeffType, SDP_AsCwiseProduct>
{
typedef typename SparseXprType::Scalar Scalar;
typedef typename SparseXprType::StorageIndex StorageIndex;
typedef typename nested_eval<DiagCoeffType,SparseXprType::IsRowMajor ? SparseXprType::RowsAtCompileTime
: SparseXprType::ColsAtCompileTime>::type DiagCoeffNested;
class InnerIterator
{
typedef typename evaluator<SparseXprType>::InnerIterator SparseXprIter;
public:
InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
: m_sparseIter(xprEval.m_sparseXprEval, outer), m_diagCoeffNested(xprEval.m_diagCoeffNested)
{}
inline Scalar value() const { return m_sparseIter.value() * m_diagCoeffNested.coeff(index()); }
inline StorageIndex index() const { return m_sparseIter.index(); }
inline Index outer() const { return m_sparseIter.outer(); }
inline Index col() const { return SparseXprType::IsRowMajor ? m_sparseIter.index() : m_sparseIter.outer(); }
inline Index row() const { return SparseXprType::IsRowMajor ? m_sparseIter.outer() : m_sparseIter.index(); }
EIGEN_STRONG_INLINE InnerIterator& operator++() { ++m_sparseIter; return *this; }
inline operator bool() const { return m_sparseIter; }
protected:
SparseXprIter m_sparseIter;
DiagCoeffNested m_diagCoeffNested;
};
sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagCoeffType &diagCoeff)
: m_sparseXprEval(sparseXpr), m_diagCoeffNested(diagCoeff)
{}
Index nonZerosEstimate() const { return m_sparseXprEval.nonZerosEstimate(); }
protected:
evaluator<SparseXprType> m_sparseXprEval;
DiagCoeffNested m_diagCoeffNested;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
| 5,808 | 40.791367 | 168 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseDot.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_DOT_H
#define EIGEN_SPARSE_DOT_H
namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
eigen_assert(size() == other.size());
eigen_assert(other.size()>0 && "you are using a non initialized vector");
internal::evaluator<Derived> thisEval(derived());
typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
Scalar res(0);
while (i)
{
res += numext::conj(i.value()) * other.coeff(i.index());
++i;
}
return res;
}
template<typename Derived>
template<typename OtherDerived>
typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
eigen_assert(size() == other.size());
internal::evaluator<Derived> thisEval(derived());
typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
internal::evaluator<OtherDerived> otherEval(other.derived());
typename internal::evaluator<OtherDerived>::InnerIterator j(otherEval, 0);
Scalar res(0);
while (i && j)
{
if (i.index()==j.index())
{
res += numext::conj(i.value()) * j.value();
++i; ++j;
}
else if (i.index()<j.index())
++i;
else
++j;
}
return res;
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::squaredNorm() const
{
return numext::real((*this).cwiseAbs2().sum());
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::norm() const
{
using std::sqrt;
return sqrt(squaredNorm());
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::blueNorm() const
{
return internal::blueNorm_impl(*this);
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_DOT_H
| 3,080 | 30.121212 | 118 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseFuzzy.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_FUZZY_H
#define EIGEN_SPARSE_FUZZY_H
namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
bool SparseMatrixBase<Derived>::isApprox(const SparseMatrixBase<OtherDerived>& other, const RealScalar &prec) const
{
const typename internal::nested_eval<Derived,2,PlainObject>::type actualA(derived());
typename internal::conditional<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor),
const typename internal::nested_eval<OtherDerived,2,PlainObject>::type,
const PlainObject>::type actualB(other.derived());
return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_FUZZY_H
| 1,107 | 35.933333 | 119 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseMap.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_MAP_H
#define EIGEN_SPARSE_MAP_H
namespace Eigen {
namespace internal {
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct traits<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef traits<PlainObjectType> TraitsBase;
enum {
Flags = TraitsBase::Flags & (~NestByRefBit)
};
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct traits<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef traits<PlainObjectType> TraitsBase;
enum {
Flags = TraitsBase::Flags & (~ (NestByRefBit | LvalueBit))
};
};
} // end namespace internal
template<typename Derived,
int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
> class SparseMapBase;
/** \ingroup SparseCore_Module
* class SparseMapBase
* \brief Common base class for Map and Ref instance of sparse matrix and vector.
*/
template<typename Derived>
class SparseMapBase<Derived,ReadOnlyAccessors>
: public SparseCompressedBase<Derived>
{
public:
typedef SparseCompressedBase<Derived> Base;
typedef typename Base::Scalar Scalar;
typedef typename Base::StorageIndex StorageIndex;
enum { IsRowMajor = Base::IsRowMajor };
using Base::operator=;
protected:
typedef typename internal::conditional<
bool(internal::is_lvalue<Derived>::value),
Scalar *, const Scalar *>::type ScalarPointer;
typedef typename internal::conditional<
bool(internal::is_lvalue<Derived>::value),
StorageIndex *, const StorageIndex *>::type IndexPointer;
Index m_outerSize;
Index m_innerSize;
Array<StorageIndex,2,1> m_zero_nnz;
IndexPointer m_outerIndex;
IndexPointer m_innerIndices;
ScalarPointer m_values;
IndexPointer m_innerNonZeros;
public:
/** \copydoc SparseMatrixBase::rows() */
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
/** \copydoc SparseMatrixBase::cols() */
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
/** \copydoc SparseMatrixBase::innerSize() */
inline Index innerSize() const { return m_innerSize; }
/** \copydoc SparseMatrixBase::outerSize() */
inline Index outerSize() const { return m_outerSize; }
/** \copydoc SparseCompressedBase::nonZeros */
inline Index nonZeros() const { return m_zero_nnz[1]; }
/** \copydoc SparseCompressedBase::isCompressed */
bool isCompressed() const { return m_innerNonZeros==0; }
//----------------------------------------
// direct access interface
/** \copydoc SparseMatrix::valuePtr */
inline const Scalar* valuePtr() const { return m_values; }
/** \copydoc SparseMatrix::innerIndexPtr */
inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; }
/** \copydoc SparseMatrix::outerIndexPtr */
inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
/** \copydoc SparseMatrix::innerNonZeroPtr */
inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
//----------------------------------------
/** \copydoc SparseMatrix::coeff */
inline Scalar coeff(Index row, Index col) const
{
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index start = m_outerIndex[outer];
Index end = isCompressed() ? m_outerIndex[outer+1] : start + m_innerNonZeros[outer];
if (start==end)
return Scalar(0);
else if (end>0 && inner==m_innerIndices[end-1])
return m_values[end-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
const StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
const Index id = r-&m_innerIndices[0];
return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
}
inline SparseMapBase(Index rows, Index cols, Index nnz, IndexPointer outerIndexPtr, IndexPointer innerIndexPtr,
ScalarPointer valuePtr, IndexPointer innerNonZerosPtr = 0)
: m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(outerIndexPtr),
m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(innerNonZerosPtr)
{}
// for vectors
inline SparseMapBase(Index size, Index nnz, IndexPointer innerIndexPtr, ScalarPointer valuePtr)
: m_outerSize(1), m_innerSize(size), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(m_zero_nnz.data()),
m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(0)
{}
/** Empty destructor */
inline ~SparseMapBase() {}
protected:
inline SparseMapBase() {}
};
/** \ingroup SparseCore_Module
* class SparseMapBase
* \brief Common base class for writable Map and Ref instance of sparse matrix and vector.
*/
template<typename Derived>
class SparseMapBase<Derived,WriteAccessors>
: public SparseMapBase<Derived,ReadOnlyAccessors>
{
typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
public:
typedef SparseMapBase<Derived, ReadOnlyAccessors> Base;
typedef typename Base::Scalar Scalar;
typedef typename Base::StorageIndex StorageIndex;
enum { IsRowMajor = Base::IsRowMajor };
using Base::operator=;
public:
//----------------------------------------
// direct access interface
using Base::valuePtr;
using Base::innerIndexPtr;
using Base::outerIndexPtr;
using Base::innerNonZeroPtr;
/** \copydoc SparseMatrix::valuePtr */
inline Scalar* valuePtr() { return Base::m_values; }
/** \copydoc SparseMatrix::innerIndexPtr */
inline StorageIndex* innerIndexPtr() { return Base::m_innerIndices; }
/** \copydoc SparseMatrix::outerIndexPtr */
inline StorageIndex* outerIndexPtr() { return Base::m_outerIndex; }
/** \copydoc SparseMatrix::innerNonZeroPtr */
inline StorageIndex* innerNonZeroPtr() { return Base::m_innerNonZeros; }
//----------------------------------------
/** \copydoc SparseMatrix::coeffRef */
inline Scalar& coeffRef(Index row, Index col)
{
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index start = Base::m_outerIndex[outer];
Index end = Base::isCompressed() ? Base::m_outerIndex[outer+1] : start + Base::m_innerNonZeros[outer];
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
StorageIndex* r = std::lower_bound(&Base::m_innerIndices[start],&Base::m_innerIndices[end],inner);
const Index id = r - &Base::m_innerIndices[0];
eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
return const_cast<Scalar*>(Base::m_values)[id];
}
inline SparseMapBase(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr,
Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
{}
// for vectors
inline SparseMapBase(Index size, Index nnz, StorageIndex* innerIndexPtr, Scalar* valuePtr)
: Base(size, nnz, innerIndexPtr, valuePtr)
{}
/** Empty destructor */
inline ~SparseMapBase() {}
protected:
inline SparseMapBase() {}
};
/** \ingroup SparseCore_Module
*
* \brief Specialization of class Map for SparseMatrix-like storage.
*
* \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
*
* \sa class Map, class SparseMatrix, class Ref<SparseMatrixType,Options>
*/
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
: public SparseMapBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
#else
template<typename SparseMatrixType>
class Map<SparseMatrixType>
: public SparseMapBase<Derived,WriteAccessors>
#endif
{
public:
typedef SparseMapBase<Map> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
enum { IsRowMajor = Base::IsRowMajor };
public:
/** Constructs a read-write Map to a sparse matrix of size \a rows x \a cols, containing \a nnz non-zero coefficients,
* stored as a sparse format as defined by the pointers \a outerIndexPtr, \a innerIndexPtr, and \a valuePtr.
* If the optional parameter \a innerNonZerosPtr is the null pointer, then a standard compressed format is assumed.
*
* This constructor is available only if \c SparseMatrixType is non-const.
*
* More details on the expected storage schemes are given in the \ref TutorialSparse "manual pages".
*/
inline Map(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr,
StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
{}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** Empty destructor */
inline ~Map() {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
: public SparseMapBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
{
public:
typedef SparseMapBase<Map> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
enum { IsRowMajor = Base::IsRowMajor };
public:
#endif
/** This is the const version of the above constructor.
*
* This constructor is available only if \c SparseMatrixType is const, e.g.:
* \code Map<const SparseMatrix<double> > \endcode
*/
inline Map(Index rows, Index cols, Index nnz, const StorageIndex* outerIndexPtr,
const StorageIndex* innerIndexPtr, const Scalar* valuePtr, const StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
{}
/** Empty destructor */
inline ~Map() {}
};
namespace internal {
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_MAP_H
| 12,589 | 40.143791 | 164 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEMATRIX_H
#define EIGEN_SPARSEMATRIX_H
namespace Eigen {
/** \ingroup SparseCore_Module
*
* \class SparseMatrix
*
* \brief A versatible sparse matrix representation
*
* This class implements a more versatile variants of the common \em compressed row/column storage format.
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
* space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
* can be done with limited memory reallocation and copies.
*
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format
* compatible with many library.
*
* More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
*
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
* \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
* is ColMajor or RowMajor. The default is 0 which means column-major.
* \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
*
* \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
* whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
* Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
*/
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
{
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef Sparse StorageKind;
typedef MatrixXpr XprKind;
enum {
RowsAtCompileTime = Dynamic,
ColsAtCompileTime = Dynamic,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic,
Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
{
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
typedef _Scalar Scalar;
typedef Dense StorageKind;
typedef _StorageIndex StorageIndex;
typedef MatrixXpr XprKind;
enum {
RowsAtCompileTime = Dynamic,
ColsAtCompileTime = 1,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = 1,
Flags = LvalueBit
};
};
template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
: public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
{
enum {
Flags = 0
};
};
} // end namespace internal
template<typename _Scalar, int _Options, typename _StorageIndex>
class SparseMatrix
: public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
{
typedef SparseCompressedBase<SparseMatrix> Base;
using Base::convert_index;
friend class SparseVector<_Scalar,0,_StorageIndex>;
public:
using Base::isCompressed;
using Base::nonZeros;
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
using Base::operator+=;
using Base::operator-=;
typedef MappedSparseMatrix<Scalar,Flags> Map;
typedef Diagonal<SparseMatrix> DiagonalReturnType;
typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
typedef typename Base::InnerIterator InnerIterator;
typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
using Base::IsRowMajor;
typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
enum {
Options = _Options
};
typedef typename Base::IndexVector IndexVector;
typedef typename Base::ScalarVector ScalarVector;
protected:
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
Index m_outerSize;
Index m_innerSize;
StorageIndex* m_outerIndex;
StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
Storage m_data;
public:
/** \returns the number of rows of the matrix */
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
/** \returns the number of columns of the matrix */
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
/** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
inline Index innerSize() const { return m_innerSize; }
/** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
inline Index outerSize() const { return m_outerSize; }
/** \returns a const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
/** \returns a non-const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline Scalar* valuePtr() { return m_data.valuePtr(); }
/** \returns a const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
/** \returns a non-const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
/** \returns a const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), innerIndexPtr() */
inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
/** \returns a non-const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), innerIndexPtr() */
inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
/** \returns a const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
/** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
/** \internal */
inline Storage& data() { return m_data; }
/** \internal */
inline const Storage& data() const { return m_data; }
/** \returns the value of the matrix at position \a i, \a j
* This function returns Scalar(0) if the element is an explicit \em zero */
inline Scalar coeff(Index row, Index col) const
{
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
}
/** \returns a non-const reference to the value of the matrix at position \a i, \a j
*
* If the element does not exist then it is inserted via the insert(Index,Index) function
* which itself turns the matrix into a non compressed form if that was not the case.
*
* This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
* function if the element does not already exist.
*/
inline Scalar& coeffRef(Index row, Index col)
{
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index start = m_outerIndex[outer];
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
if(end<=start)
return insert(row,col);
const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
if((p<end) && (m_data.index(p)==inner))
return m_data.value(p);
else
return insert(row,col);
}
/** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
* The non zero coefficient must \b not already exist.
*
* If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
* mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
* In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
* inserted by increasing outer-indices.
*
* If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
* call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
*
* Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
* if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
*
*/
Scalar& insert(Index row, Index col);
public:
/** Removes all non zeros but keep allocated memory
*
* This function does not free the currently allocated memory. To release as much as memory as possible,
* call \code mat.data().squeeze(); \endcode after resizing it.
*
* \sa resize(Index,Index), data()
*/
inline void setZero()
{
m_data.clear();
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
if(m_innerNonZeros)
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
}
/** Preallocates \a reserveSize non zeros.
*
* Precondition: the matrix must be in compressed mode. */
inline void reserve(Index reserveSize)
{
eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
m_data.reserve(reserveSize);
}
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
*
* This function turns the matrix in non-compressed mode.
*
* The type \c SizesType must expose the following interface:
\code
typedef value_type;
const value_type& operator[](i) const;
\endcode
* for \c i in the [0,this->outerSize()[ range.
* Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
*/
template<class SizesType>
inline void reserve(const SizesType& reserveSizes);
#else
template<class SizesType>
inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
#if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
typename
#endif
SizesType::value_type())
{
EIGEN_UNUSED_VARIABLE(enableif);
reserveInnerVectors(reserveSizes);
}
#endif // EIGEN_PARSED_BY_DOXYGEN
protected:
template<class SizesType>
inline void reserveInnerVectors(const SizesType& reserveSizes)
{
if(isCompressed())
{
Index totalReserveSize = 0;
// turn the matrix into non-compressed mode
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
// temporarily use m_innerSizes to hold the new starting points.
StorageIndex* newOuterIndex = m_innerNonZeros;
StorageIndex count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
totalReserveSize += reserveSizes[j];
}
m_data.reserve(totalReserveSize);
StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
for(Index j=m_outerSize-1; j>=0; --j)
{
StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
}
previousOuterIndex = m_outerIndex[j];
m_outerIndex[j] = newOuterIndex[j];
m_innerNonZeros[j] = innerNNZ;
}
m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
m_data.resize(m_outerIndex[m_outerSize]);
}
else
{
StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
if (!newOuterIndex) internal::throw_std_bad_alloc();
StorageIndex count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
count += toReserve + m_innerNonZeros[j];
}
newOuterIndex[m_outerSize] = count;
m_data.resize(count);
for(Index j=m_outerSize-1; j>=0; --j)
{
Index offset = newOuterIndex[j] - m_outerIndex[j];
if(offset>0)
{
StorageIndex innerNNZ = m_innerNonZeros[j];
for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
}
}
}
std::swap(m_outerIndex, newOuterIndex);
std::free(newOuterIndex);
}
}
public:
//--- low level purely coherent filling ---
/** \internal
* \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
* - the nonzero does not already exist
* - the new coefficient is the last one according to the storage order
*
* Before filling a given inner vector you must call the statVec(Index) function.
*
* After an insertion session, you should call the finalize() function.
*
* \sa insert, insertBackByOuterInner, startVec */
inline Scalar& insertBack(Index row, Index col)
{
return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
}
/** \internal
* \sa insertBack, startVec */
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
{
eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
Index p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
m_data.append(Scalar(0), inner);
return m_data.value(p);
}
/** \internal
* \warning use it only if you know what you are doing */
inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
{
Index p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
m_data.append(Scalar(0), inner);
return m_data.value(p);
}
/** \internal
* \sa insertBack, insertBackByOuterInner */
inline void startVec(Index outer)
{
eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
m_outerIndex[outer+1] = m_outerIndex[outer];
}
/** \internal
* Must be called after inserting a set of non zero entries using the low level compressed API.
*/
inline void finalize()
{
if(isCompressed())
{
StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
Index i = m_outerSize;
// find the last filled column
while (i>=0 && m_outerIndex[i]==0)
--i;
++i;
while (i<=m_outerSize)
{
m_outerIndex[i] = size;
++i;
}
}
}
//---
template<typename InputIterators>
void setFromTriplets(const InputIterators& begin, const InputIterators& end);
template<typename InputIterators,typename DupFunctor>
void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
template<typename DupFunctor>
void collapseDuplicates(DupFunctor dup_func = DupFunctor());
//---
/** \internal
* same as insert(Index,Index) except that the indices are given relative to the storage order */
Scalar& insertByOuterInner(Index j, Index i)
{
return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
}
/** Turns the matrix into the \em compressed format.
*/
void makeCompressed()
{
if(isCompressed())
return;
eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
Index oldStart = m_outerIndex[1];
m_outerIndex[1] = m_innerNonZeros[0];
for(Index j=1; j<m_outerSize; ++j)
{
Index nextOldStart = m_outerIndex[j+1];
Index offset = oldStart - m_outerIndex[j];
if(offset>0)
{
for(Index k=0; k<m_innerNonZeros[j]; ++k)
{
m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
}
}
m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
oldStart = nextOldStart;
}
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
m_data.squeeze();
}
/** Turns the matrix into the uncompressed mode */
void uncompress()
{
if(m_innerNonZeros != 0)
return;
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
for (Index i = 0; i < m_outerSize; i++)
{
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
}
/** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
prune(default_prunning_func(reference,epsilon));
}
/** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
* The functor type \a KeepFunc must implement the following function:
* \code
* bool operator() (const Index& row, const Index& col, const Scalar& value) const;
* \endcode
* \sa prune(Scalar,RealScalar)
*/
template<typename KeepFunc>
void prune(const KeepFunc& keep = KeepFunc())
{
// TODO optimize the uncompressed mode to avoid moving and allocating the data twice
makeCompressed();
StorageIndex k = 0;
for(Index j=0; j<m_outerSize; ++j)
{
Index previousStart = m_outerIndex[j];
m_outerIndex[j] = k;
Index end = m_outerIndex[j+1];
for(Index i=previousStart; i<end; ++i)
{
if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
{
m_data.value(k) = m_data.value(i);
m_data.index(k) = m_data.index(i);
++k;
}
}
}
m_outerIndex[m_outerSize] = k;
m_data.resize(k,0);
}
/** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
*
* If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
* and the storage of the out of bounds coefficients is kept and reserved.
* Call makeCompressed() to pack the entries and squeeze extra memory.
*
* \sa reserve(), setZero(), makeCompressed()
*/
void conservativeResize(Index rows, Index cols)
{
// No change
if (this->rows() == rows && this->cols() == cols) return;
// If one dimension is null, then there is nothing to be preserved
if(rows==0 || cols==0) return resize(rows,cols);
Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
// Deals with inner non zeros
if (m_innerNonZeros)
{
// Resize m_innerNonZeros
StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
if (!newInnerNonZeros) internal::throw_std_bad_alloc();
m_innerNonZeros = newInnerNonZeros;
for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
m_innerNonZeros[i] = 0;
}
else if (innerChange < 0)
{
// Inner size decreased: allocate a new m_innerNonZeros
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
for(Index i = 0; i < m_outerSize; i++)
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
// Change the m_innerNonZeros in case of a decrease of inner size
if (m_innerNonZeros && innerChange < 0)
{
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
{
StorageIndex &n = m_innerNonZeros[i];
StorageIndex start = m_outerIndex[i];
while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
}
}
m_innerSize = newInnerSize;
// Re-allocate outer index structure if necessary
if (outerChange == 0)
return;
StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
if (!newOuterIndex) internal::throw_std_bad_alloc();
m_outerIndex = newOuterIndex;
if (outerChange > 0)
{
StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
m_outerIndex[i] = last;
}
m_outerSize += outerChange;
}
/** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
*
* This function does not free the currently allocated memory. To release as much as memory as possible,
* call \code mat.data().squeeze(); \endcode after resizing it.
*
* \sa reserve(), setZero()
*/
void resize(Index rows, Index cols)
{
const Index outerSize = IsRowMajor ? rows : cols;
m_innerSize = IsRowMajor ? cols : rows;
m_data.clear();
if (m_outerSize != outerSize || m_outerSize==0)
{
std::free(m_outerIndex);
m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
if (!m_outerIndex) internal::throw_std_bad_alloc();
m_outerSize = outerSize;
}
if(m_innerNonZeros)
{
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
}
/** \internal
* Resize the nonzero vector to \a size */
void resizeNonZeros(Index size)
{
m_data.resize(size);
}
/** \returns a const expression of the diagonal coefficients. */
const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
/** \returns a read-write expression of the diagonal coefficients.
* \warning If the diagonal entries are written, then all diagonal
* entries \b must already exist, otherwise an assertion will be raised.
*/
DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
/** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
inline SparseMatrix()
: m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
resize(0, 0);
}
/** Constructs a \a rows \c x \a cols empty matrix */
inline SparseMatrix(Index rows, Index cols)
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
resize(rows, cols);
}
/** Constructs a sparse matrix from the sparse expression \a other */
template<typename OtherDerived>
inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
check_template_parameters();
const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
if (needToTranspose)
*this = other.derived();
else
{
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
internal::call_assignment_no_alias(*this, other.derived());
}
}
/** Constructs a sparse matrix from the sparse selfadjoint view \a other */
template<typename OtherDerived, unsigned int UpLo>
inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
Base::operator=(other);
}
/** Copy constructor (it performs a deep copy) */
inline SparseMatrix(const SparseMatrix& other)
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
*this = other.derived();
}
/** \brief Copy constructor with in-place evaluation */
template<typename OtherDerived>
SparseMatrix(const ReturnByValue<OtherDerived>& other)
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
initAssignment(other);
other.evalTo(*this);
}
/** \brief Copy constructor with in-place evaluation */
template<typename OtherDerived>
explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
*this = other.derived();
}
/** Swaps the content of two sparse matrices of the same type.
* This is a fast operation that simply swaps the underlying pointers and parameters. */
inline void swap(SparseMatrix& other)
{
//EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
std::swap(m_outerIndex, other.m_outerIndex);
std::swap(m_innerSize, other.m_innerSize);
std::swap(m_outerSize, other.m_outerSize);
std::swap(m_innerNonZeros, other.m_innerNonZeros);
m_data.swap(other.m_data);
}
/** Sets *this to the identity matrix.
* This function also turns the matrix into compressed mode, and drop any reserved memory. */
inline void setIdentity()
{
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
this->m_data.resize(rows());
Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
inline SparseMatrix& operator=(const SparseMatrix& other)
{
if (other.isRValue())
{
swap(other.const_cast_derived());
}
else if(this!=&other)
{
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
initAssignment(other);
if(other.isCompressed())
{
internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
m_data = other.m_data;
}
else
{
Base::operator=(other);
}
}
return *this;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
#endif // EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
{
EIGEN_DBG_SPARSE(
s << "Nonzero entries:\n";
if(m.isCompressed())
{
for (Index i=0; i<m.nonZeros(); ++i)
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
}
else
{
for (Index i=0; i<m.outerSize(); ++i)
{
Index p = m.m_outerIndex[i];
Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
for (; k<pe; ++k) {
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
}
for (; k<m.m_outerIndex[i+1]; ++k) {
s << "(_,_) ";
}
}
}
s << std::endl;
s << std::endl;
s << "Outer pointers:\n";
for (Index i=0; i<m.outerSize(); ++i) {
s << m.m_outerIndex[i] << " ";
}
s << " $" << std::endl;
if(!m.isCompressed())
{
s << "Inner non zeros:\n";
for (Index i=0; i<m.outerSize(); ++i) {
s << m.m_innerNonZeros[i] << " ";
}
s << " $" << std::endl;
}
s << std::endl;
);
s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
return s;
}
/** Destructor */
inline ~SparseMatrix()
{
std::free(m_outerIndex);
std::free(m_innerNonZeros);
}
/** Overloaded for performance */
Scalar sum() const;
# ifdef EIGEN_SPARSEMATRIX_PLUGIN
# include EIGEN_SPARSEMATRIX_PLUGIN
# endif
protected:
template<typename Other>
void initAssignment(const Other& other)
{
resize(other.rows(), other.cols());
if(m_innerNonZeros)
{
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
}
/** \internal
* \sa insert(Index,Index) */
EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
/** \internal
* A vector object that is equal to 0 everywhere but v at the position i */
class SingletonVector
{
StorageIndex m_index;
StorageIndex m_value;
public:
typedef StorageIndex value_type;
SingletonVector(Index i, Index v)
: m_index(convert_index(i)), m_value(convert_index(v))
{}
StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
};
/** \internal
* \sa insert(Index,Index) */
EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
public:
/** \internal
* \sa insert(Index,Index) */
EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
{
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
eigen_assert(!isCompressed());
eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
m_data.index(p) = convert_index(inner);
return (m_data.value(p) = 0);
}
private:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
struct default_prunning_func {
default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
inline bool operator() (const Index&, const Index&, const Scalar& value) const
{
return !internal::isMuchSmallerThan(value, reference, epsilon);
}
Scalar reference;
RealScalar epsilon;
};
};
namespace internal {
template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
{
enum { IsRowMajor = SparseMatrixType::IsRowMajor };
typedef typename SparseMatrixType::Scalar Scalar;
typedef typename SparseMatrixType::StorageIndex StorageIndex;
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
if(begin!=end)
{
// pass 1: count the nnz per inner-vector
typename SparseMatrixType::IndexVector wi(trMat.outerSize());
wi.setZero();
for(InputIterator it(begin); it!=end; ++it)
{
eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
wi(IsRowMajor ? it->col() : it->row())++;
}
// pass 2: insert all the elements into trMat
trMat.reserve(wi);
for(InputIterator it(begin); it!=end; ++it)
trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
// pass 3:
trMat.collapseDuplicates(dup_func);
}
// pass 4: transposed copy -> implicit sorting
mat = trMat;
}
}
/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
*
* A \em triplet is a tuple (i,j,value) defining a non-zero element.
* The input list of triplets does not have to be sorted, and can contains duplicated elements.
* In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
* This is a \em O(n) operation, with \em n the number of triplet elements.
* The initial contents of \c *this is destroyed.
* The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
* or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
*
* The \a InputIterators value_type must provide the following interface:
* \code
* Scalar value() const; // the value
* Scalar row() const; // the row index i
* Scalar col() const; // the column index j
* \endcode
* See for instance the Eigen::Triplet template class.
*
* Here is a typical usage example:
* \code
typedef Triplet<double> T;
std::vector<T> tripletList;
triplets.reserve(estimation_of_entries);
for(...)
{
// ...
tripletList.push_back(T(i,j,v_ij));
}
SparseMatrixType m(rows,cols);
m.setFromTriplets(tripletList.begin(), tripletList.end());
// m is ready to go!
* \endcode
*
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
* be explicitely stored into a std::vector for instance.
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators>
void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
{
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
}
/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
* \code
* value = dup_func(OldValue, NewValue)
* \endcode
* Here is a C++11 example keeping the latest entry only:
* \code
* mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
* \endcode
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators,typename DupFunctor>
void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
{
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
}
/** \internal */
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename DupFunctor>
void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
{
eigen_assert(!isCompressed());
// TODO, in practice we should be able to use m_innerNonZeros for that task
IndexVector wi(innerSize());
wi.fill(-1);
StorageIndex count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
for(Index j=0; j<outerSize(); ++j)
{
StorageIndex start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
{
Index i = m_data.index(k);
if(wi(i)>=start)
{
// we already meet this entry => accumulate it
m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
}
else
{
m_data.value(count) = m_data.value(k);
m_data.index(count) = m_data.index(k);
wi(i) = count;
++count;
}
}
m_outerIndex[j] = start;
}
m_outerIndex[m_outerSize] = count;
// turn the matrix into compressed form
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
}
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename OtherDerived>
EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
if (needToTranspose)
{
#ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
#endif
// two passes algorithm:
// 1 - compute the number of coeffs per dest inner vector
// 2 - do the actual copy/eval
// Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
typedef internal::evaluator<_OtherCopy> OtherCopyEval;
OtherCopy otherCopy(other.derived());
OtherCopyEval otherCopyEval(otherCopy);
SparseMatrix dest(other.rows(),other.cols());
Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
for (Index j=0; j<otherCopy.outerSize(); ++j)
for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
++dest.m_outerIndex[it.index()];
// prefix sum
StorageIndex count = 0;
IndexVector positions(dest.outerSize());
for (Index j=0; j<dest.outerSize(); ++j)
{
StorageIndex tmp = dest.m_outerIndex[j];
dest.m_outerIndex[j] = count;
positions[j] = count;
count += tmp;
}
dest.m_outerIndex[dest.outerSize()] = count;
// alloc
dest.m_data.resize(count);
// pass 2
for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
{
for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
{
Index pos = positions[it.index()]++;
dest.m_data.index(pos) = j;
dest.m_data.value(pos) = it.value();
}
}
this->swap(dest);
return *this;
}
else
{
if(other.isRValue())
{
initAssignment(other.derived());
}
// there is no special optimization
return Base::operator=(other.derived());
}
}
template<typename _Scalar, int _Options, typename _StorageIndex>
typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
{
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
if(isCompressed())
{
if(nonZeros()==0)
{
// reserve space if not already done
if(m_data.allocatedSize()==0)
m_data.reserve(2*m_innerSize);
// turn the matrix into non-compressed mode
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
// pack all inner-vectors to the end of the pre-allocated space
// and allocate the entire free-space to the first inner-vector
StorageIndex end = convert_index(m_data.allocatedSize());
for(Index j=1; j<=m_outerSize; ++j)
m_outerIndex[j] = end;
}
else
{
// turn the matrix into non-compressed mode
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
for(Index j=0; j<m_outerSize; ++j)
m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
}
}
// check whether we can do a fast "push back" insertion
Index data_end = m_data.allocatedSize();
// First case: we are filling a new inner vector which is packed at the end.
// We assume that all remaining inner-vectors are also empty and packed to the end.
if(m_outerIndex[outer]==data_end)
{
eigen_internal_assert(m_innerNonZeros[outer]==0);
// pack previous empty inner-vectors to end of the used-space
// and allocate the entire free-space to the current inner-vector.
StorageIndex p = convert_index(m_data.size());
Index j = outer;
while(j>=0 && m_innerNonZeros[j]==0)
m_outerIndex[j--] = p;
// push back the new element
++m_innerNonZeros[outer];
m_data.append(Scalar(0), inner);
// check for reallocation
if(data_end != m_data.allocatedSize())
{
// m_data has been reallocated
// -> move remaining inner-vectors back to the end of the free-space
// so that the entire free-space is allocated to the current inner-vector.
eigen_internal_assert(data_end < m_data.allocatedSize());
StorageIndex new_end = convert_index(m_data.allocatedSize());
for(Index k=outer+1; k<=m_outerSize; ++k)
if(m_outerIndex[k]==data_end)
m_outerIndex[k] = new_end;
}
return m_data.value(p);
}
// Second case: the next inner-vector is packed to the end
// and the current inner-vector end match the used-space.
if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
{
eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
// add space for the new element
++m_innerNonZeros[outer];
m_data.resize(m_data.size()+1);
// check for reallocation
if(data_end != m_data.allocatedSize())
{
// m_data has been reallocated
// -> move remaining inner-vectors back to the end of the free-space
// so that the entire free-space is allocated to the current inner-vector.
eigen_internal_assert(data_end < m_data.allocatedSize());
StorageIndex new_end = convert_index(m_data.allocatedSize());
for(Index k=outer+1; k<=m_outerSize; ++k)
if(m_outerIndex[k]==data_end)
m_outerIndex[k] = new_end;
}
// and insert it at the right position (sorted insertion)
Index startId = m_outerIndex[outer];
Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
while ( (p > startId) && (m_data.index(p-1) > inner) )
{
m_data.index(p) = m_data.index(p-1);
m_data.value(p) = m_data.value(p-1);
--p;
}
m_data.index(p) = convert_index(inner);
return (m_data.value(p) = 0);
}
if(m_data.size() != m_data.allocatedSize())
{
// make sure the matrix is compatible to random un-compressed insertion:
m_data.resize(m_data.allocatedSize());
this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
}
return insertUncompressed(row,col);
}
template<typename _Scalar, int _Options, typename _StorageIndex>
EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
{
eigen_assert(!isCompressed());
const Index outer = IsRowMajor ? row : col;
const StorageIndex inner = convert_index(IsRowMajor ? col : row);
Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
StorageIndex innerNNZ = m_innerNonZeros[outer];
if(innerNNZ>=room)
{
// this inner vector is full, we need to reallocate the whole buffer :(
reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
}
Index startId = m_outerIndex[outer];
Index p = startId + m_innerNonZeros[outer];
while ( (p > startId) && (m_data.index(p-1) > inner) )
{
m_data.index(p) = m_data.index(p-1);
m_data.value(p) = m_data.value(p-1);
--p;
}
eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
m_innerNonZeros[outer]++;
m_data.index(p) = inner;
return (m_data.value(p) = 0);
}
template<typename _Scalar, int _Options, typename _StorageIndex>
EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
{
eigen_assert(isCompressed());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index previousOuter = outer;
if (m_outerIndex[outer+1]==0)
{
// we start a new inner vector
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
{
m_outerIndex[previousOuter] = convert_index(m_data.size());
--previousOuter;
}
m_outerIndex[outer+1] = m_outerIndex[outer];
}
// here we have to handle the tricky case where the outerIndex array
// starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
// the 2nd inner vector...
bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
&& (std::size_t(m_outerIndex[outer+1]) == m_data.size());
std::size_t startId = m_outerIndex[outer];
// FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
std::size_t p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
double reallocRatio = 1;
if (m_data.allocatedSize()<=m_data.size())
{
// if there is no preallocated memory, let's reserve a minimum of 32 elements
if (m_data.size()==0)
{
m_data.reserve(32);
}
else
{
// we need to reallocate the data, to reduce multiple reallocations
// we use a smart resize algorithm based on the current filling ratio
// in addition, we use double to avoid integers overflows
double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
// furthermore we bound the realloc ratio to:
// 1) reduce multiple minor realloc when the matrix is almost filled
// 2) avoid to allocate too much memory when the matrix is almost empty
reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
}
}
m_data.resize(m_data.size()+1,reallocRatio);
if (!isLastVec)
{
if (previousOuter==-1)
{
// oops wrong guess.
// let's correct the outer offsets
for (Index k=0; k<=(outer+1); ++k)
m_outerIndex[k] = 0;
Index k=outer+1;
while(m_outerIndex[k]==0)
m_outerIndex[k++] = 1;
while (k<=m_outerSize && m_outerIndex[k]!=0)
m_outerIndex[k++]++;
p = 0;
--k;
k = m_outerIndex[k]-1;
while (k>0)
{
m_data.index(k) = m_data.index(k-1);
m_data.value(k) = m_data.value(k-1);
k--;
}
}
else
{
// we are not inserting into the last inner vec
// update outer indices:
Index j = outer+2;
while (j<=m_outerSize && m_outerIndex[j]!=0)
m_outerIndex[j++]++;
--j;
// shift data of last vecs:
Index k = m_outerIndex[j]-1;
while (k>=Index(p))
{
m_data.index(k) = m_data.index(k-1);
m_data.value(k) = m_data.value(k-1);
k--;
}
}
}
while ( (p > startId) && (m_data.index(p-1) > inner) )
{
m_data.index(p) = m_data.index(p-1);
m_data.value(p) = m_data.value(p-1);
--p;
}
m_data.index(p) = inner;
return (m_data.value(p) = 0);
}
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
: evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
{
typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
evaluator() : Base() {}
explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSEMATRIX_H
| 52,349 | 36.286325 | 167 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseMatrixBase.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEMATRIXBASE_H
#define EIGEN_SPARSEMATRIXBASE_H
namespace Eigen {
/** \ingroup SparseCore_Module
*
* \class SparseMatrixBase
*
* \brief Base class of any sparse matrices or sparse expressions
*
* \tparam Derived is the derived type, e.g. a sparse matrix type, or an expression, etc.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
*/
template<typename Derived> class SparseMatrixBase
: public EigenBase<Derived>
{
public:
typedef typename internal::traits<Derived>::Scalar Scalar;
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.
*
* It is an alias for the Scalar type */
typedef Scalar value_type;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
/** The integer type used to \b store indices within a SparseMatrix.
* For a \c SparseMatrix<Scalar,Options,IndexType> it an alias of the third template parameter \c IndexType. */
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
typedef typename internal::add_const_on_value_type_if_arithmetic<
typename internal::packet_traits<Scalar>::type
>::type PacketReturnType;
typedef SparseMatrixBase StorageBaseType;
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
template<typename OtherDerived>
Derived& operator=(const EigenBase<OtherDerived> &other);
enum {
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
/**< The number of rows at compile-time. This is just a copy of the value provided
* by the \a Derived type. If a value is not known at compile-time,
* it is set to the \a Dynamic constant.
* \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
/**< The number of columns at compile-time. This is just a copy of the value provided
* by the \a Derived type. If a value is not known at compile-time,
* it is set to the \a Dynamic constant.
* \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
internal::traits<Derived>::ColsAtCompileTime>::ret),
/**< This is equal to the number of coefficients, i.e. the number of
* rows times the number of columns, or to \a Dynamic if this is not
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
MaxColsAtCompileTime>::ret),
IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
/**< This is set to true if either the number of rows or the number of
* columns is known at compile-time to be equal to 1. Indeed, in that case,
* we are dealing with a column-vector (if there is only one column) or with
* a row-vector (if there is only one row). */
Flags = internal::traits<Derived>::Flags,
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
*/
IsRowMajor = Flags&RowMajorBit ? 1 : 0,
InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
: int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
#ifndef EIGEN_PARSED_BY_DOXYGEN
_HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
#endif
};
/** \internal the return type of MatrixBase::adjoint() */
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
Transpose<const Derived>
>::type AdjointReturnType;
typedef Transpose<Derived> TransposeReturnType;
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
// FIXME storage order do not match evaluator storage order
typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, StorageIndex> PlainObject;
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is the "real scalar" type; if the \a Scalar type is already real numbers
* (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
* \a Scalar is \a std::complex<T> then RealScalar is \a T.
*
* \sa class NumTraits
*/
typedef typename NumTraits<Scalar>::Real RealScalar;
/** \internal the return type of coeff()
*/
typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
/** type of the equivalent dense matrix */
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
/** type of the equivalent square matrix */
typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
inline Derived& derived() { return *static_cast<Derived*>(this); }
inline Derived& const_cast_derived() const
{ return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
typedef EigenBase<Derived> Base;
#endif // not EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
#ifdef EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_DOC_UNARY_ADDONS(METHOD,OP) /** <p>This method does not change the sparsity of \c *this: the OP is applied to explicitly stored coefficients only. \sa SparseCompressedBase::coeffs() </p> */
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /** <p> \warning This method returns a read-only expression for any sparse matrices. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) /** <p> \warning This method returns a read-write expression for COND sparse matrices only. Otherwise, the returned expression is read-only. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
#else
#define EIGEN_DOC_UNARY_ADDONS(X,Y)
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
#endif
# include "../plugins/CommonCwiseUnaryOps.h"
# include "../plugins/CommonCwiseBinaryOps.h"
# include "../plugins/MatrixCwiseUnaryOps.h"
# include "../plugins/MatrixCwiseBinaryOps.h"
# include "../plugins/BlockMethods.h"
# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
# include EIGEN_SPARSEMATRIXBASE_PLUGIN
# endif
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#undef EIGEN_DOC_UNARY_ADDONS
#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
/** \returns the number of rows. \sa cols() */
inline Index rows() const { return derived().rows(); }
/** \returns the number of columns. \sa rows() */
inline Index cols() const { return derived().cols(); }
/** \returns the number of coefficients, which is \a rows()*cols().
* \sa rows(), cols(). */
inline Index size() const { return rows() * cols(); }
/** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns
* \code rows()==1 || cols()==1 \endcode
* \sa rows(), cols(), IsVectorAtCompileTime. */
inline bool isVector() const { return rows()==1 || cols()==1; }
/** \returns the size of the storage major dimension,
* i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
/** \returns the size of the inner dimension according to the storage order,
* i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
bool isRValue() const { return m_isRValue; }
Derived& markAsRValue() { m_isRValue = true; return derived(); }
SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
template<typename OtherDerived>
Derived& operator=(const ReturnByValue<OtherDerived>& other);
template<typename OtherDerived>
inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other);
inline Derived& operator=(const Derived& other);
protected:
template<typename OtherDerived>
inline Derived& assign(const OtherDerived& other);
template<typename OtherDerived>
inline void assignGeneric(const OtherDerived& other);
public:
friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
{
typedef typename Derived::Nested Nested;
typedef typename internal::remove_all<Nested>::type NestedCleaned;
if (Flags&RowMajorBit)
{
Nested nm(m.derived());
internal::evaluator<NestedCleaned> thisEval(nm);
for (Index row=0; row<nm.outerSize(); ++row)
{
Index col = 0;
for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, row); it; ++it)
{
for ( ; col<it.index(); ++col)
s << "0 ";
s << it.value() << " ";
++col;
}
for ( ; col<m.cols(); ++col)
s << "0 ";
s << std::endl;
}
}
else
{
Nested nm(m.derived());
internal::evaluator<NestedCleaned> thisEval(nm);
if (m.cols() == 1) {
Index row = 0;
for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, 0); it; ++it)
{
for ( ; row<it.index(); ++row)
s << "0" << std::endl;
s << it.value() << std::endl;
++row;
}
for ( ; row<m.rows(); ++row)
s << "0" << std::endl;
}
else
{
SparseMatrix<Scalar, RowMajorBit, StorageIndex> trans = m;
s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, StorageIndex> >&>(trans);
}
}
return s;
}
template<typename OtherDerived>
Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
template<typename OtherDerived>
Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
template<typename OtherDerived>
Derived& operator+=(const DiagonalBase<OtherDerived>& other);
template<typename OtherDerived>
Derived& operator-=(const DiagonalBase<OtherDerived>& other);
template<typename OtherDerived>
Derived& operator+=(const EigenBase<OtherDerived> &other);
template<typename OtherDerived>
Derived& operator-=(const EigenBase<OtherDerived> &other);
Derived& operator*=(const Scalar& other);
Derived& operator/=(const Scalar& other);
template<typename OtherDerived> struct CwiseProductDenseReturnType {
typedef CwiseBinaryOp<internal::scalar_product_op<typename ScalarBinaryOpTraits<
typename internal::traits<Derived>::Scalar,
typename internal::traits<OtherDerived>::Scalar
>::ReturnType>,
const Derived,
const OtherDerived
> Type;
};
template<typename OtherDerived>
EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType<OtherDerived>::Type
cwiseProduct(const MatrixBase<OtherDerived> &other) const;
// sparse * diagonal
template<typename OtherDerived>
const Product<Derived,OtherDerived>
operator*(const DiagonalBase<OtherDerived> &other) const
{ return Product<Derived,OtherDerived>(derived(), other.derived()); }
// diagonal * sparse
template<typename OtherDerived> friend
const Product<OtherDerived,Derived>
operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
{ return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
// sparse * sparse
template<typename OtherDerived>
const Product<Derived,OtherDerived,AliasFreeProduct>
operator*(const SparseMatrixBase<OtherDerived> &other) const;
// sparse * dense
template<typename OtherDerived>
const Product<Derived,OtherDerived>
operator*(const MatrixBase<OtherDerived> &other) const
{ return Product<Derived,OtherDerived>(derived(), other.derived()); }
// dense * sparse
template<typename OtherDerived> friend
const Product<OtherDerived,Derived>
operator*(const MatrixBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
{ return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
/** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
{
return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
}
template<typename OtherDerived>
Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
template<int Mode>
inline const TriangularView<const Derived, Mode> triangularView() const;
template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SparseSelfAdjointView<Derived, UpLo> Type; };
template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SparseSelfAdjointView<const Derived, UpLo> Type; };
template<unsigned int UpLo> inline
typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
template<unsigned int UpLo> inline
typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
RealScalar squaredNorm() const;
RealScalar norm() const;
RealScalar blueNorm() const;
TransposeReturnType transpose() { return TransposeReturnType(derived()); }
const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }
const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); }
// inner-vector
typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
InnerVectorReturnType innerVector(Index outer);
const ConstInnerVectorReturnType innerVector(Index outer) const;
// set of inner-vectors
typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
DenseMatrixType toDense() const
{
return DenseMatrixType(derived());
}
template<typename OtherDerived>
bool isApprox(const SparseMatrixBase<OtherDerived>& other,
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
template<typename OtherDerived>
bool isApprox(const MatrixBase<OtherDerived>& other,
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other,prec); }
/** \returns the matrix or vector obtained by evaluating this expression.
*
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns
* a const reference, in order to avoid a useless copy.
*/
inline const typename internal::eval<Derived>::type eval() const
{ return typename internal::eval<Derived>::type(derived()); }
Scalar sum() const;
inline const SparseView<Derived>
pruned(const Scalar& reference = Scalar(0), const RealScalar& epsilon = NumTraits<Scalar>::dummy_precision()) const;
protected:
bool m_isRValue;
static inline StorageIndex convert_index(const Index idx) {
return internal::convert_index<StorageIndex>(idx);
}
private:
template<typename Dest> void evalTo(Dest &) const;
};
} // end namespace Eigen
#endif // EIGEN_SPARSEMATRIXBASE_H
| 17,923 | 43.147783 | 262 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparsePermutation.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_PERMUTATION_H
#define EIGEN_SPARSE_PERMUTATION_H
// This file implements sparse * permutation products
namespace Eigen {
namespace internal {
template<typename ExpressionType, int Side, bool Transposed>
struct permutation_matrix_product<ExpressionType, Side, Transposed, SparseShape>
{
typedef typename nested_eval<ExpressionType, 1>::type MatrixType;
typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
typedef typename MatrixTypeCleaned::Scalar Scalar;
typedef typename MatrixTypeCleaned::StorageIndex StorageIndex;
enum {
SrcStorageOrder = MatrixTypeCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
};
typedef typename internal::conditional<MoveOuter,
SparseMatrix<Scalar,SrcStorageOrder,StorageIndex>,
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> >::type ReturnType;
template<typename Dest,typename PermutationType>
static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr)
{
MatrixType mat(xpr);
if(MoveOuter)
{
SparseMatrix<Scalar,SrcStorageOrder,StorageIndex> tmp(mat.rows(), mat.cols());
Matrix<StorageIndex,Dynamic,1> sizes(mat.outerSize());
for(Index j=0; j<mat.outerSize(); ++j)
{
Index jp = perm.indices().coeff(j);
sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = StorageIndex(mat.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros());
}
tmp.reserve(sizes);
for(Index j=0; j<mat.outerSize(); ++j)
{
Index jp = perm.indices().coeff(j);
Index jsrc = ((Side==OnTheRight) ^ Transposed) ? jp : j;
Index jdst = ((Side==OnTheLeft) ^ Transposed) ? jp : j;
for(typename MatrixTypeCleaned::InnerIterator it(mat,jsrc); it; ++it)
tmp.insertByOuterInner(jdst,it.index()) = it.value();
}
dst = tmp;
}
else
{
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> tmp(mat.rows(), mat.cols());
Matrix<StorageIndex,Dynamic,1> sizes(tmp.outerSize());
sizes.setZero();
PermutationMatrix<Dynamic,Dynamic,StorageIndex> perm_cpy;
if((Side==OnTheLeft) ^ Transposed)
perm_cpy = perm;
else
perm_cpy = perm.transpose();
for(Index j=0; j<mat.outerSize(); ++j)
for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)
sizes[perm_cpy.indices().coeff(it.index())]++;
tmp.reserve(sizes);
for(Index j=0; j<mat.outerSize(); ++j)
for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)
tmp.insertByOuterInner(perm_cpy.indices().coeff(it.index()),j) = it.value();
dst = tmp;
}
}
};
}
namespace internal {
template <int ProductTag> struct product_promote_storage_type<Sparse, PermutationStorage, ProductTag> { typedef Sparse ret; };
template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Sparse, ProductTag> { typedef Sparse ret; };
// TODO, the following two overloads are only needed to define the right temporary type through
// typename traits<permutation_sparse_matrix_product<Rhs,Lhs,OnTheRight,false> >::ReturnType
// whereas it should be correctly handled by traits<Product<> >::PlainObject
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, PermutationShape, SparseShape>
: public evaluator<typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType>
{
typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;
typedef typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType PlainObject;
typedef evaluator<PlainObject> Base;
enum {
Flags = Base::Flags | EvalBeforeNestingBit
};
explicit product_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<Lhs, Rhs, PermutationShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
}
protected:
PlainObject m_result;
};
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, SparseShape, PermutationShape >
: public evaluator<typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType>
{
typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;
typedef typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType PlainObject;
typedef evaluator<PlainObject> Base;
enum {
Flags = Base::Flags | EvalBeforeNestingBit
};
explicit product_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<Lhs, Rhs, SparseShape, PermutationShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
}
protected:
PlainObject m_result;
};
} // end namespace internal
/** \returns the matrix with the permutation applied to the columns
*/
template<typename SparseDerived, typename PermDerived>
inline const Product<SparseDerived, PermDerived, AliasFreeProduct>
operator*(const SparseMatrixBase<SparseDerived>& matrix, const PermutationBase<PermDerived>& perm)
{ return Product<SparseDerived, PermDerived, AliasFreeProduct>(matrix.derived(), perm.derived()); }
/** \returns the matrix with the permutation applied to the rows
*/
template<typename SparseDerived, typename PermDerived>
inline const Product<PermDerived, SparseDerived, AliasFreeProduct>
operator*( const PermutationBase<PermDerived>& perm, const SparseMatrixBase<SparseDerived>& matrix)
{ return Product<PermDerived, SparseDerived, AliasFreeProduct>(perm.derived(), matrix.derived()); }
/** \returns the matrix with the inverse permutation applied to the columns.
*/
template<typename SparseDerived, typename PermutationType>
inline const Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>
operator*(const SparseMatrixBase<SparseDerived>& matrix, const InverseImpl<PermutationType, PermutationStorage>& tperm)
{
return Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>(matrix.derived(), tperm.derived());
}
/** \returns the matrix with the inverse permutation applied to the rows.
*/
template<typename SparseDerived, typename PermutationType>
inline const Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>
operator*(const InverseImpl<PermutationType,PermutationStorage>& tperm, const SparseMatrixBase<SparseDerived>& matrix)
{
return Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>(tperm.derived(), matrix.derived());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
| 7,329 | 39.949721 | 146 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseProduct.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEPRODUCT_H
#define EIGEN_SPARSEPRODUCT_H
namespace Eigen {
/** \returns an expression of the product of two sparse matrices.
* By default a conservative product preserving the symbolic non zeros is performed.
* The automatic pruning of the small values can be achieved by calling the pruned() function
* in which case a totally different product algorithm is employed:
* \code
* C = (A*B).pruned(); // supress numerical zeros (exact)
* C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon);
* \endcode
* where \c ref is a meaningful non zero reference value.
* */
template<typename Derived>
template<typename OtherDerived>
inline const Product<Derived,OtherDerived,AliasFreeProduct>
SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
{
return Product<Derived,OtherDerived,AliasFreeProduct>(derived(), other.derived());
}
namespace internal {
// sparse * sparse
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
{
template<typename Dest>
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
{
evalTo(dst, lhs, rhs, typename evaluator_traits<Dest>::Shape());
}
// dense += sparse * sparse
template<typename Dest,typename ActualLhs>
static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
{
typedef typename nested_eval<ActualLhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_sparse_to_dense_product_selector<typename remove_all<LhsNested>::type,
typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
}
// dense -= sparse * sparse
template<typename Dest>
static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
{
addTo(dst, -lhs, rhs);
}
protected:
// sparse = sparse * sparse
template<typename Dest>
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, SparseShape)
{
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::conservative_sparse_sparse_product_selector<typename remove_all<LhsNested>::type,
typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
}
// dense = sparse * sparse
template<typename Dest>
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, DenseShape)
{
dst.setZero();
addTo(dst, lhs, rhs);
}
};
// sparse * sparse-triangular
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, SparseTriangularShape, ProductType>
: public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
{};
// sparse-triangular * sparse
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType>
: public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
{};
// dense = sparse-product (can be sparse*sparse, sparse*perm, etc.)
template< typename DstXprType, typename Lhs, typename Rhs>
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
{
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
}
};
// dense += sparse-product (can be sparse*sparse, sparse*perm, etc.)
template< typename DstXprType, typename Lhs, typename Rhs>
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
{
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
{
generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());
}
};
// dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.)
template< typename DstXprType, typename Lhs, typename Rhs>
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
{
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
{
generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());
}
};
template<typename Lhs, typename Rhs, int Options>
struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>
: public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>
{
typedef SparseView<Product<Lhs, Rhs, Options> > XprType;
typedef typename XprType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
explicit unary_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
using std::abs;
::new (static_cast<Base*>(this)) Base(m_result);
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(xpr.nestedExpression().lhs());
RhsNested rhsNested(xpr.nestedExpression().rhs());
internal::sparse_sparse_product_with_pruning_selector<typename remove_all<LhsNested>::type,
typename remove_all<RhsNested>::type, PlainObject>::run(lhsNested,rhsNested,m_result,
abs(xpr.reference())*xpr.epsilon());
}
protected:
PlainObject m_result;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEPRODUCT_H
| 7,049 | 40.470588 | 183 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseRedux.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEREDUX_H
#define EIGEN_SPARSEREDUX_H
namespace Eigen {
template<typename Derived>
typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
Scalar res(0);
internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
for (typename internal::evaluator<Derived>::InnerIterator iter(thisEval,j); iter; ++iter)
res += iter.value();
return res;
}
template<typename _Scalar, int _Options, typename _Index>
typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
SparseMatrix<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
if(this->isCompressed())
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
else
return Base::sum();
}
template<typename _Scalar, int _Options, typename _Index>
typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
SparseVector<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
}
} // end namespace Eigen
#endif // EIGEN_SPARSEREDUX_H
| 1,699 | 33 | 93 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseRef.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_REF_H
#define EIGEN_SPARSE_REF_H
namespace Eigen {
enum {
StandardCompressedFormat = 2 /**< used by Ref<SparseMatrix> to specify whether the input storage must be in standard compressed form */
};
namespace internal {
template<typename Derived> class SparseRefBase;
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
struct traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
enum {
Options = _Options,
Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
};
template<typename Derived> struct match {
enum {
StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && StorageOrderMatch
};
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
};
};
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
struct traits<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
: public traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
{
enum {
Flags = (traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
};
};
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
struct traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
: public traits<SparseVector<MatScalar,MatOptions,MatIndex> >
{
typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
enum {
Options = _Options,
Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
};
template<typename Derived> struct match {
enum {
MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && Derived::IsVectorAtCompileTime
};
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
};
};
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
struct traits<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
: public traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
{
enum {
Flags = (traits<SparseVector<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
};
};
template<typename Derived>
struct traits<SparseRefBase<Derived> > : public traits<Derived> {};
template<typename Derived> class SparseRefBase
: public SparseMapBase<Derived>
{
public:
typedef SparseMapBase<Derived> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseRefBase)
SparseRefBase()
: Base(RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime, 0, 0, 0, 0, 0)
{}
protected:
template<typename Expression>
void construct(Expression& expr)
{
if(expr.outerIndexPtr()==0)
::new (static_cast<Base*>(this)) Base(expr.size(), expr.nonZeros(), expr.innerIndexPtr(), expr.valuePtr());
else
::new (static_cast<Base*>(this)) Base(expr.rows(), expr.cols(), expr.nonZeros(), expr.outerIndexPtr(), expr.innerIndexPtr(), expr.valuePtr(), expr.innerNonZeroPtr());
}
};
} // namespace internal
/**
* \ingroup SparseCore_Module
*
* \brief A sparse matrix expression referencing an existing sparse expression
*
* \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
* \tparam Options specifies whether the a standard compressed format is required \c Options is \c #StandardCompressedFormat, or \c 0.
* The default is \c 0.
*
* \sa class Ref
*/
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType >
: public internal::SparseRefBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
#else
template<typename SparseMatrixType, int Options>
class Ref<SparseMatrixType, Options>
: public SparseMapBase<Derived,WriteAccessors> // yes, that's weird to use Derived here, but that works!
#endif
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef internal::traits<Ref> Traits;
template<int OtherOptions>
inline Ref(const SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
template<int OtherOptions>
inline Ref(const MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
public:
typedef internal::SparseRefBase<Ref> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<int OtherOptions>
inline Ref(SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
{
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
Base::construct(expr.derived());
}
template<int OtherOptions>
inline Ref(MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
{
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
Base::construct(expr.derived());
}
template<typename Derived>
inline Ref(const SparseCompressedBase<Derived>& expr)
#else
/** Implicit constructor from any sparse expression (2D matrix or 1D vector) */
template<typename Derived>
inline Ref(SparseCompressedBase<Derived>& expr)
#endif
{
EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
Base::construct(expr.const_cast_derived());
}
};
// this is the const ref version
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
: public internal::SparseRefBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> TPlainObjectType;
typedef internal::traits<Ref> Traits;
public:
typedef internal::SparseRefBase<Ref> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
template<typename Derived>
inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)
{
construct(expr.derived(), typename Traits::template match<Derived>::type());
}
inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
}
template<typename OtherRef>
inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {
construct(other.derived(), typename Traits::template match<OtherRef>::type());
}
~Ref() {
if(m_hasCopy) {
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
obj->~TPlainObjectType();
}
}
protected:
template<typename Expression>
void construct(const Expression& expr,internal::true_type)
{
if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed()))
{
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
else
{
Base::construct(expr);
}
}
template<typename Expression>
void construct(const Expression& expr, internal::false_type)
{
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
protected:
char m_object_bytes[sizeof(TPlainObjectType)];
bool m_hasCopy;
};
/**
* \ingroup SparseCore_Module
*
* \brief A sparse vector expression referencing an existing sparse vector expression
*
* \tparam SparseVectorType the equivalent sparse vector type of the referenced data, it must be a template instance of class SparseVector.
*
* \sa class Ref
*/
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType >
: public internal::SparseRefBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
#else
template<typename SparseVectorType>
class Ref<SparseVectorType>
: public SparseMapBase<Derived,WriteAccessors>
#endif
{
typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef internal::traits<Ref> Traits;
template<int OtherOptions>
inline Ref(const SparseVector<MatScalar,OtherOptions,MatIndex>& expr);
public:
typedef internal::SparseRefBase<Ref> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<int OtherOptions>
inline Ref(SparseVector<MatScalar,OtherOptions,MatIndex>& expr)
{
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseVector<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
Base::construct(expr.derived());
}
template<typename Derived>
inline Ref(const SparseCompressedBase<Derived>& expr)
#else
/** Implicit constructor from any 1D sparse vector expression */
template<typename Derived>
inline Ref(SparseCompressedBase<Derived>& expr)
#endif
{
EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
Base::construct(expr.const_cast_derived());
}
};
// this is the const ref version
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType>
: public internal::SparseRefBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
{
typedef SparseVector<MatScalar,MatOptions,MatIndex> TPlainObjectType;
typedef internal::traits<Ref> Traits;
public:
typedef internal::SparseRefBase<Ref> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
template<typename Derived>
inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)
{
construct(expr.derived(), typename Traits::template match<Derived>::type());
}
inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
}
template<typename OtherRef>
inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {
construct(other.derived(), typename Traits::template match<OtherRef>::type());
}
~Ref() {
if(m_hasCopy) {
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
obj->~TPlainObjectType();
}
}
protected:
template<typename Expression>
void construct(const Expression& expr,internal::true_type)
{
Base::construct(expr);
}
template<typename Expression>
void construct(const Expression& expr, internal::false_type)
{
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
protected:
char m_object_bytes[sizeof(TPlainObjectType)];
bool m_hasCopy;
};
namespace internal {
// FIXME shall we introduce a general evaluatior_ref that we can specialize for any sparse object once, and thus remove this copy-pasta thing...
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_REF_H
| 15,492 | 37.927136 | 172 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseSelfAdjointView.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
#define EIGEN_SPARSE_SELFADJOINTVIEW_H
namespace Eigen {
/** \ingroup SparseCore_Module
* \class SparseSelfAdjointView
*
* \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
*
* \param MatrixType the type of the dense matrix storing the coefficients
* \param Mode can be either \c #Lower or \c #Upper
*
* This class is an expression of a sefladjoint matrix from a triangular part of a matrix
* with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
* and most of the time this is the only way that it is used.
*
* \sa SparseMatrixBase::selfadjointView()
*/
namespace internal {
template<typename MatrixType, unsigned int Mode>
struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
};
template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
template<int Mode,typename MatrixType,int DestOrder>
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
}
template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
: public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
{
public:
enum {
Mode = _Mode,
TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
};
typedef EigenBase<SparseSelfAdjointView> Base;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
{
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
/** \internal \returns a reference to the nested matrix */
const _MatrixTypeNested& matrix() const { return m_matrix; }
typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
/** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
*
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
template<typename OtherDerived>
Product<SparseSelfAdjointView, OtherDerived>
operator*(const SparseMatrixBase<OtherDerived>& rhs) const
{
return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
}
/** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
*
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
template<typename OtherDerived> friend
Product<OtherDerived, SparseSelfAdjointView>
operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
}
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
template<typename OtherDerived>
Product<SparseSelfAdjointView,OtherDerived>
operator*(const MatrixBase<OtherDerived>& rhs) const
{
return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
}
/** Efficient dense vector/matrix times sparse self-adjoint matrix product */
template<typename OtherDerived> friend
Product<OtherDerived,SparseSelfAdjointView>
operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
}
/** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
* \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
*
* \returns a reference to \c *this
*
* To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
* call this function with u.adjoint().
*/
template<typename DerivedU>
SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
/** \returns an expression of P H P^-1 */
// TODO implement twists in a more evaluator friendly fashion
SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
{
return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
}
template<typename SrcMatrixType,int SrcMode>
SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
{
internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
return *this;
}
SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
{
PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
return *this = src.twistedBy(pnull);
}
template<typename SrcMatrixType,unsigned int SrcMode>
SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
{
PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
return *this = src.twistedBy(pnull);
}
void resize(Index rows, Index cols)
{
EIGEN_ONLY_USED_FOR_DEBUG(rows);
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows == this->rows() && cols == this->cols()
&& "SparseSelfadjointView::resize() does not actually allow to resize.");
}
protected:
MatrixTypeNested m_matrix;
//mutable VectorI m_countPerRow;
//mutable VectorI m_countPerCol;
private:
template<typename Dest> void evalTo(Dest &) const;
};
/***************************************************************************
* Implementation of SparseMatrixBase methods
***************************************************************************/
template<typename Derived>
template<unsigned int UpLo>
typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
{
return SparseSelfAdjointView<const Derived, UpLo>(derived());
}
template<typename Derived>
template<unsigned int UpLo>
typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
{
return SparseSelfAdjointView<Derived, UpLo>(derived());
}
/***************************************************************************
* Implementation of SparseSelfAdjointView methods
***************************************************************************/
template<typename MatrixType, unsigned int Mode>
template<typename DerivedU>
SparseSelfAdjointView<MatrixType,Mode>&
SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
{
SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
if(alpha==Scalar(0))
m_matrix = tmp.template triangularView<Mode>();
else
m_matrix += alpha * tmp.template triangularView<Mode>();
return *this;
}
namespace internal {
// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
// in the future selfadjoint-ness should be defined by the expression traits
// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
template<typename MatrixType, unsigned int Mode>
struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
{
typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
typedef SparseSelfAdjointShape Shape;
};
struct SparseSelfAdjoint2Sparse {};
template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
{
typedef typename DstXprType::StorageIndex StorageIndex;
typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
template<typename DestScalar,int StorageOrder>
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
{
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
}
// FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
template<typename DestScalar,int StorageOrder,typename AssignFunc>
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
{
SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
run(tmp, src, AssignOpType());
call_assignment_no_alias_no_transpose(dst, tmp, func);
}
template<typename DestScalar,int StorageOrder>
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
{
SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
run(tmp, src, AssignOpType());
dst += tmp;
}
template<typename DestScalar,int StorageOrder>
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
{
SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
run(tmp, src, AssignOpType());
dst -= tmp;
}
template<typename DestScalar>
static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
{
// TODO directly evaluate into dst;
SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
dst = tmp;
}
};
} // end namespace internal
/***************************************************************************
* Implementation of sparse self-adjoint time dense matrix
***************************************************************************/
namespace internal {
template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
EIGEN_ONLY_USED_FOR_DEBUG(alpha);
typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
typedef typename LhsEval::InnerIterator LhsIterator;
typedef typename SparseLhsType::Scalar LhsScalar;
enum {
LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
ProcessFirstHalf =
((Mode&(Upper|Lower))==(Upper|Lower))
|| ( (Mode&Upper) && !LhsIsRowMajor)
|| ( (Mode&Lower) && LhsIsRowMajor),
ProcessSecondHalf = !ProcessFirstHalf
};
SparseLhsTypeNested lhs_nested(lhs);
LhsEval lhsEval(lhs_nested);
// work on one column at once
for (Index k=0; k<rhs.cols(); ++k)
{
for (Index j=0; j<lhs.outerSize(); ++j)
{
LhsIterator i(lhsEval,j);
// handle diagonal coeff
if (ProcessSecondHalf)
{
while (i && i.index()<j) ++i;
if(i && i.index()==j)
{
res(j,k) += alpha * i.value() * rhs(j,k);
++i;
}
}
// premultiplied rhs for scatters
typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
// accumulator for partial scalar product
typename DenseResType::Scalar res_j(0);
for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
{
LhsScalar lhs_ij = i.value();
if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
res_j += lhs_ij * rhs(i.index(),k);
res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
}
res(j,k) += alpha * res_j;
// handle diagonal coeff
if (ProcessFirstHalf && i && (i.index()==j))
res(j,k) += alpha * i.value() * rhs(j,k);
}
}
}
template<typename LhsView, typename Rhs, int ProductType>
struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
: generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
{
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
{
typedef typename LhsView::_MatrixTypeNested Lhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhsView.matrix());
RhsNested rhsNested(rhs);
internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename RhsView, int ProductType>
struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
: generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
{
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
{
typedef typename RhsView::_MatrixTypeNested Rhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhsView.matrix());
// transpose everything
Transpose<Dest> dstT(dst);
internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
template<typename LhsView, typename Rhs, int ProductTag>
struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
: public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
{
typedef Product<LhsView, Rhs, DefaultProduct> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
product_evaluator(const XprType& xpr)
: m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
}
protected:
typename Rhs::PlainObject m_lhs;
PlainObject m_result;
};
template<typename Lhs, typename RhsView, int ProductTag>
struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
: public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
{
typedef Product<Lhs, RhsView, DefaultProduct> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
product_evaluator(const XprType& xpr)
: m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
}
protected:
typename Lhs::PlainObject m_rhs;
PlainObject m_result;
};
} // namespace internal
/***************************************************************************
* Implementation of symmetric copies and permutations
***************************************************************************/
namespace internal {
template<int Mode,typename MatrixType,int DestOrder>
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
{
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::Scalar Scalar;
typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef evaluator<MatrixType> MatEval;
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
MatEval matEval(mat);
Dest& dest(_dest.derived());
enum {
StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
};
Index size = mat.rows();
VectorI count;
count.resize(size);
count.setZero();
dest.resize(size,size);
for(Index j = 0; j<size; ++j)
{
Index jp = perm ? perm[j] : j;
for(MatIterator it(matEval,j); it; ++it)
{
Index i = it.index();
Index r = it.row();
Index c = it.col();
Index ip = perm ? perm[i] : i;
if(Mode==(Upper|Lower))
count[StorageOrderMatch ? jp : ip]++;
else if(r==c)
count[ip]++;
else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
{
count[ip]++;
count[jp]++;
}
}
}
Index nnz = count.sum();
// reserve space
dest.resizeNonZeros(nnz);
dest.outerIndexPtr()[0] = 0;
for(Index j=0; j<size; ++j)
dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
for(Index j=0; j<size; ++j)
count[j] = dest.outerIndexPtr()[j];
// copy data
for(StorageIndex j = 0; j<size; ++j)
{
for(MatIterator it(matEval,j); it; ++it)
{
StorageIndex i = internal::convert_index<StorageIndex>(it.index());
Index r = it.row();
Index c = it.col();
StorageIndex jp = perm ? perm[j] : j;
StorageIndex ip = perm ? perm[i] : i;
if(Mode==(Upper|Lower))
{
Index k = count[StorageOrderMatch ? jp : ip]++;
dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
dest.valuePtr()[k] = it.value();
}
else if(r==c)
{
Index k = count[ip]++;
dest.innerIndexPtr()[k] = ip;
dest.valuePtr()[k] = it.value();
}
else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
{
if(!StorageOrderMatch)
std::swap(ip,jp);
Index k = count[jp]++;
dest.innerIndexPtr()[k] = ip;
dest.valuePtr()[k] = it.value();
k = count[ip]++;
dest.innerIndexPtr()[k] = jp;
dest.valuePtr()[k] = numext::conj(it.value());
}
}
}
}
template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
{
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::Scalar Scalar;
SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef evaluator<MatrixType> MatEval;
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
enum {
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
};
MatEval matEval(mat);
Index size = mat.rows();
VectorI count(size);
count.setZero();
dest.resize(size,size);
for(StorageIndex j = 0; j<size; ++j)
{
StorageIndex jp = perm ? perm[j] : j;
for(MatIterator it(matEval,j); it; ++it)
{
StorageIndex i = it.index();
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
StorageIndex ip = perm ? perm[i] : i;
count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
}
}
dest.outerIndexPtr()[0] = 0;
for(Index j=0; j<size; ++j)
dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
dest.resizeNonZeros(dest.outerIndexPtr()[size]);
for(Index j=0; j<size; ++j)
count[j] = dest.outerIndexPtr()[j];
for(StorageIndex j = 0; j<size; ++j)
{
for(MatIterator it(matEval,j); it; ++it)
{
StorageIndex i = it.index();
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
StorageIndex jp = perm ? perm[j] : j;
StorageIndex ip = perm? perm[i] : i;
Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
if(!StorageOrderMatch) std::swap(ip,jp);
if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
dest.valuePtr()[k] = numext::conj(it.value());
else
dest.valuePtr()[k] = it.value();
}
}
}
}
// TODO implement twists in a more evaluator friendly fashion
namespace internal {
template<typename MatrixType, int Mode>
struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
};
}
template<typename MatrixType,int Mode>
class SparseSymmetricPermutationProduct
: public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
{
public:
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
enum {
RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
};
protected:
typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
public:
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
: m_matrix(mat), m_perm(perm)
{}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
const NestedExpression& matrix() const { return m_matrix; }
const Perm& perm() const { return m_perm; }
protected:
MatrixTypeNested m_matrix;
const Perm& m_perm;
};
namespace internal {
template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
{
typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
typedef typename DstXprType::StorageIndex DstIndex;
template<int Options>
static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
{
// internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
dst = tmp;
}
template<typename DestType,unsigned int DestMode>
static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
{
internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
| 25,670 | 38.073059 | 198 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseSolverBase.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSESOLVERBASE_H
#define EIGEN_SPARSESOLVERBASE_H
namespace Eigen {
namespace internal {
/** \internal
* Helper functions to solve with a sparse right-hand-side and result.
* The rhs is decomposed into small vertical panels which are solved through dense temporaries.
*/
template<typename Decomposition, typename Rhs, typename Dest>
typename enable_if<Rhs::ColsAtCompileTime!=1 && Dest::ColsAtCompileTime!=1>::type
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
{
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
typedef typename Dest::Scalar DestScalar;
// we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
static const Index NbColsAtOnce = 4;
Index rhsCols = rhs.cols();
Index size = rhs.rows();
// the temporary matrices do not need more columns than NbColsAtOnce:
Index tmpCols = (std::min)(rhsCols, NbColsAtOnce);
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols);
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols);
for(Index k=0; k<rhsCols; k+=NbColsAtOnce)
{
Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);
tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);
tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));
dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
}
}
// Overload for vector as rhs
template<typename Decomposition, typename Rhs, typename Dest>
typename enable_if<Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1>::type
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
{
typedef typename Dest::Scalar DestScalar;
Index size = rhs.rows();
Eigen::Matrix<DestScalar,Dynamic,1> rhs_dense(rhs);
Eigen::Matrix<DestScalar,Dynamic,1> dest_dense(size);
dest_dense = dec.solve(rhs_dense);
dest = dest_dense.sparseView();
}
} // end namespace internal
/** \class SparseSolverBase
* \ingroup SparseCore_Module
* \brief A base class for sparse solvers
*
* \tparam Derived the actual type of the solver.
*
*/
template<typename Derived>
class SparseSolverBase : internal::noncopyable
{
public:
/** Default constructor */
SparseSolverBase()
: m_isInitialized(false)
{}
~SparseSolverBase()
{}
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* \sa compute()
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "Solver is not initialized.");
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return Solve<Derived, Rhs>(derived(), b.derived());
}
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* \sa compute()
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const SparseMatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "Solver is not initialized.");
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return Solve<Derived, Rhs>(derived(), b.derived());
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal default implementation of solving with a sparse rhs */
template<typename Rhs,typename Dest>
void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const
{
internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived());
}
#endif // EIGEN_PARSED_BY_DOXYGEN
protected:
mutable bool m_isInitialized;
};
} // end namespace Eigen
#endif // EIGEN_SPARSESOLVERBASE_H
| 4,424 | 34.4 | 116 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
namespace Eigen {
namespace internal {
// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
template<typename Lhs, typename Rhs, typename ResultType>
static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)
{
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
typedef typename remove_all<Lhs>::type::Scalar Scalar;
typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
//Index size = lhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
AmbiVector<Scalar,StorageIndex> tempVector(rows);
// mimics a resizeByInnerOuter:
if(ResultType::IsRowMajor)
res.resize(cols, rows);
else
res.resize(rows, cols);
evaluator<Lhs> lhsEval(lhs);
evaluator<Rhs> rhsEval(rhs);
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
res.reserve(estimated_nnz_prod);
double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols()));
for (Index j=0; j<cols; ++j)
{
// FIXME:
//double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
// let's do a more accurate determination of the nnz ratio for the current column j of res
tempVector.init(ratioColRes);
tempVector.setZero();
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
// FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
tempVector.restart();
Scalar x = rhsIt.value();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)
{
tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
}
}
res.startVec(j);
for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
res.insertBackByOuterInner(j,it.index()) = it.value();
}
res.finalize();
}
template<typename Lhs, typename Rhs, typename ResultType,
int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
struct sparse_sparse_product_with_pruning_selector;
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
{
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
res.swap(_res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// we need a col-major matrix to hold the result
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> SparseTemporaryType;
SparseTemporaryType _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
res = _res;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// let's transpose the product to get a column x column product
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
res.swap(_res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
ColMajorMatrixLhs colLhs(lhs);
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
// let's transpose the product to get a column x column product
// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
// SparseTemporaryType _res(res.cols(), res.rows());
// sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
// res = _res.transpose();
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;
RowMajorMatrixLhs rowLhs(lhs);
sparse_sparse_product_with_pruning_selector<RowMajorMatrixLhs,Rhs,ResultType,RowMajor,RowMajor>(rowLhs,rhs,res,tolerance);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;
RowMajorMatrixRhs rowRhs(rhs);
sparse_sparse_product_with_pruning_selector<Lhs,RowMajorMatrixRhs,ResultType,RowMajor,RowMajor,RowMajor>(lhs,rowRhs,res,tolerance);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<Lhs,ColMajorMatrixRhs,ResultType>(lhs, colRhs, res, tolerance);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
ColMajorMatrixLhs colLhs(lhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,Rhs,ResultType>(colLhs, rhs, res, tolerance);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
| 8,741 | 42.929648 | 150 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseTranspose.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSETRANSPOSE_H
#define EIGEN_SPARSETRANSPOSE_H
namespace Eigen {
namespace internal {
template<typename MatrixType,int CompressedAccess=int(MatrixType::Flags&CompressedAccessBit)>
class SparseTransposeImpl
: public SparseMatrixBase<Transpose<MatrixType> >
{};
template<typename MatrixType>
class SparseTransposeImpl<MatrixType,CompressedAccessBit>
: public SparseCompressedBase<Transpose<MatrixType> >
{
typedef SparseCompressedBase<Transpose<MatrixType> > Base;
public:
using Base::derived;
typedef typename Base::Scalar Scalar;
typedef typename Base::StorageIndex StorageIndex;
inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
inline const Scalar* valuePtr() const { return derived().nestedExpression().valuePtr(); }
inline const StorageIndex* innerIndexPtr() const { return derived().nestedExpression().innerIndexPtr(); }
inline const StorageIndex* outerIndexPtr() const { return derived().nestedExpression().outerIndexPtr(); }
inline const StorageIndex* innerNonZeroPtr() const { return derived().nestedExpression().innerNonZeroPtr(); }
inline Scalar* valuePtr() { return derived().nestedExpression().valuePtr(); }
inline StorageIndex* innerIndexPtr() { return derived().nestedExpression().innerIndexPtr(); }
inline StorageIndex* outerIndexPtr() { return derived().nestedExpression().outerIndexPtr(); }
inline StorageIndex* innerNonZeroPtr() { return derived().nestedExpression().innerNonZeroPtr(); }
};
}
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
: public internal::SparseTransposeImpl<MatrixType>
{
protected:
typedef internal::SparseTransposeImpl<MatrixType> Base;
};
namespace internal {
template<typename ArgType>
struct unary_evaluator<Transpose<ArgType>, IteratorBased>
: public evaluator_base<Transpose<ArgType> >
{
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
public:
typedef Transpose<ArgType> XprType;
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
}
class InnerIterator : public EvalIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
: EvalIterator(unaryOp.m_argImpl,outer)
{}
Index row() const { return EvalIterator::col(); }
Index col() const { return EvalIterator::row(); }
};
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& op) :m_argImpl(op.nestedExpression()) {}
protected:
evaluator<ArgType> m_argImpl;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSETRANSPOSE_H
| 3,175 | 33.150538 | 113 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseTriangularView.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2015 Gael Guennebaud <[email protected]>
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
#define EIGEN_SPARSE_TRIANGULARVIEW_H
namespace Eigen {
/** \ingroup SparseCore_Module
*
* \brief Base class for a triangular part in a \b sparse matrix
*
* This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated.
* It extends class TriangularView with additional methods which are available for sparse expressions only.
*
* \sa class TriangularView, SparseMatrixBase::triangularView()
*/
template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<MatrixType,Mode,Sparse>
: public SparseMatrixBase<TriangularView<MatrixType,Mode> >
{
enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
|| ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
SkipLast = !SkipFirst,
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
};
typedef TriangularView<MatrixType,Mode> TriangularViewType;
protected:
// dummy solve function to make TriangularView happy.
void solve() const;
typedef SparseMatrixBase<TriangularViewType> Base;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {
if(!(internal::is_same<RhsType,DstType>::value && internal::extract_data(dst) == internal::extract_data(rhs)))
dst = rhs;
this->solveInPlace(dst);
}
/** Applies the inverse of \c *this to the dense vector or matrix \a other, "in-place" */
template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
/** Applies the inverse of \c *this to the sparse vector or matrix \a other, "in-place" */
template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
};
namespace internal {
template<typename ArgType, unsigned int Mode>
struct unary_evaluator<TriangularView<ArgType,Mode>, IteratorBased>
: evaluator_base<TriangularView<ArgType,Mode> >
{
typedef TriangularView<ArgType,Mode> XprType;
protected:
typedef typename XprType::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit))
|| ((Mode&Upper) && (ArgType::Flags&RowMajorBit)),
SkipLast = !SkipFirst,
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
};
public:
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {}
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
}
class InnerIterator : public EvalIterator
{
typedef EvalIterator Base;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer)
: Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()<xprEval.m_arg.innerSize())
{
if(SkipFirst)
{
while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
Base::operator++();
if(HasUnitDiag)
m_returnOne = m_containsDiag;
}
else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
{
if((!SkipFirst) && Base::operator bool())
Base::operator++();
m_returnOne = m_containsDiag;
}
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
if(HasUnitDiag && m_returnOne)
m_returnOne = false;
else
{
Base::operator++();
if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
{
if((!SkipFirst) && Base::operator bool())
Base::operator++();
m_returnOne = m_containsDiag;
}
}
return *this;
}
EIGEN_STRONG_INLINE operator bool() const
{
if(HasUnitDiag && m_returnOne)
return true;
if(SkipFirst) return Base::operator bool();
else
{
if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
else return (Base::operator bool() && this->index() <= this->outer());
}
}
// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); }
// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); }
inline StorageIndex index() const
{
if(HasUnitDiag && m_returnOne) return internal::convert_index<StorageIndex>(Base::outer());
else return Base::index();
}
inline Scalar value() const
{
if(HasUnitDiag && m_returnOne) return Scalar(1);
else return Base::value();
}
protected:
bool m_returnOne;
bool m_containsDiag;
private:
Scalar& valueRef();
};
protected:
evaluator<ArgType> m_argImpl;
const ArgType& m_arg;
};
} // end namespace internal
template<typename Derived>
template<int Mode>
inline const TriangularView<const Derived, Mode>
SparseMatrixBase<Derived>::triangularView() const
{
return TriangularView<const Derived, Mode>(derived());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
| 6,435 | 32.873684 | 128 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseUtil.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEUTIL_H
#define EIGEN_SPARSEUTIL_H
namespace Eigen {
#ifdef NDEBUG
#define EIGEN_DBG_SPARSE(X)
#else
#define EIGEN_DBG_SPARSE(X) X
#endif
#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
template<typename OtherDerived> \
EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
{ \
return Base::operator Op(other.derived()); \
} \
EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
{ \
return Base::operator Op(other); \
}
#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
template<typename Other> \
EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
{ \
return Base::operator Op(scalar); \
}
#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =)
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
EIGEN_GENERIC_PUBLIC_INTERFACE(Derived)
const int CoherentAccessPattern = 0x1;
const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseMatrix;
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class DynamicSparseMatrix;
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseVector;
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class MappedSparseMatrix;
template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
template<typename MatrixType> class SparseView;
template<typename Lhs, typename Rhs> class SparseSparseProduct;
template<typename Lhs, typename Rhs> class SparseTimeDenseProduct;
template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
template<typename Lhs, typename Rhs,
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
template<typename Lhs, typename Rhs,
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
namespace internal {
template<typename T,int Rows,int Cols,int Flags> struct sparse_eval;
template<typename T> struct eval<T,Sparse>
: sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime,traits<T>::Flags>
{};
template<typename T,int Cols,int Flags> struct sparse_eval<T,1,Cols,Flags> {
typedef typename traits<T>::Scalar _Scalar;
typedef typename traits<T>::StorageIndex _StorageIndex;
public:
typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type;
};
template<typename T,int Rows,int Flags> struct sparse_eval<T,Rows,1,Flags> {
typedef typename traits<T>::Scalar _Scalar;
typedef typename traits<T>::StorageIndex _StorageIndex;
public:
typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type;
};
// TODO this seems almost identical to plain_matrix_type<T, Sparse>
template<typename T,int Rows,int Cols,int Flags> struct sparse_eval {
typedef typename traits<T>::Scalar _Scalar;
typedef typename traits<T>::StorageIndex _StorageIndex;
enum { _Options = ((Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
public:
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
};
template<typename T,int Flags> struct sparse_eval<T,1,1,Flags> {
typedef typename traits<T>::Scalar _Scalar;
public:
typedef Matrix<_Scalar, 1, 1> type;
};
template<typename T> struct plain_matrix_type<T,Sparse>
{
typedef typename traits<T>::Scalar _Scalar;
typedef typename traits<T>::StorageIndex _StorageIndex;
enum { _Options = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
public:
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
};
template<typename T>
struct plain_object_eval<T,Sparse>
: sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime, evaluator<T>::Flags>
{};
template<typename Decomposition, typename RhsType>
struct solve_traits<Decomposition,RhsType,Sparse>
{
typedef typename sparse_eval<RhsType, RhsType::RowsAtCompileTime, RhsType::ColsAtCompileTime,traits<RhsType>::Flags>::type PlainObject;
};
template<typename Derived>
struct generic_xpr_base<Derived, MatrixXpr, Sparse>
{
typedef SparseMatrixBase<Derived> type;
};
struct SparseTriangularShape { static std::string debugName() { return "SparseTriangularShape"; } };
struct SparseSelfAdjointShape { static std::string debugName() { return "SparseSelfAdjointShape"; } };
template<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type; };
template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape type; };
} // end namespace internal
/** \ingroup SparseCore_Module
*
* \class Triplet
*
* \brief A small structure to hold a non zero as a triplet (i,j,value).
*
* \sa SparseMatrix::setFromTriplets()
*/
template<typename Scalar, typename StorageIndex=typename SparseMatrix<Scalar>::StorageIndex >
class Triplet
{
public:
Triplet() : m_row(0), m_col(0), m_value(0) {}
Triplet(const StorageIndex& i, const StorageIndex& j, const Scalar& v = Scalar(0))
: m_row(i), m_col(j), m_value(v)
{}
/** \returns the row index of the element */
const StorageIndex& row() const { return m_row; }
/** \returns the column index of the element */
const StorageIndex& col() const { return m_col; }
/** \returns the value of the element */
const Scalar& value() const { return m_value; }
protected:
StorageIndex m_row, m_col;
Scalar m_value;
};
} // end namespace Eigen
#endif // EIGEN_SPARSEUTIL_H
| 6,602 | 35.888268 | 173 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEVECTOR_H
#define EIGEN_SPARSEVECTOR_H
namespace Eigen {
/** \ingroup SparseCore_Module
* \class SparseVector
*
* \brief a sparse vector class
*
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
*
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
*/
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct traits<SparseVector<_Scalar, _Options, _StorageIndex> >
{
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef Sparse StorageKind;
typedef MatrixXpr XprKind;
enum {
IsColVector = (_Options & RowMajorBit) ? 0 : 1,
RowsAtCompileTime = IsColVector ? Dynamic : 1,
ColsAtCompileTime = IsColVector ? 1 : Dynamic,
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
// Sparse-Vector-Assignment kinds:
enum {
SVA_RuntimeSwitch,
SVA_Inner,
SVA_Outer
};
template< typename Dest, typename Src,
int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
: Src::InnerSizeAtCompileTime==1 ? SVA_Outer
: SVA_Inner>
struct sparse_vector_assign_selector;
}
template<typename _Scalar, int _Options, typename _StorageIndex>
class SparseVector
: public SparseCompressedBase<SparseVector<_Scalar, _Options, _StorageIndex> >
{
typedef SparseCompressedBase<SparseVector> Base;
using Base::convert_index;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
enum { IsColVector = internal::traits<SparseVector>::IsColVector };
enum {
Options = _Options
};
EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
inline const StorageIndex* outerIndexPtr() const { return 0; }
inline StorageIndex* outerIndexPtr() { return 0; }
inline const StorageIndex* innerNonZeroPtr() const { return 0; }
inline StorageIndex* innerNonZeroPtr() { return 0; }
/** \internal */
inline Storage& data() { return m_data; }
/** \internal */
inline const Storage& data() const { return m_data; }
inline Scalar coeff(Index row, Index col) const
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
return coeff(IsColVector ? row : col);
}
inline Scalar coeff(Index i) const
{
eigen_assert(i>=0 && i<m_size);
return m_data.at(StorageIndex(i));
}
inline Scalar& coeffRef(Index row, Index col)
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
return coeffRef(IsColVector ? row : col);
}
/** \returns a reference to the coefficient value at given index \a i
* This operation involes a log(rho*size) binary search. If the coefficient does not
* exist yet, then a sorted insertion into a sequential buffer is performed.
*
* This insertion might be very costly if the number of nonzeros above \a i is large.
*/
inline Scalar& coeffRef(Index i)
{
eigen_assert(i>=0 && i<m_size);
return m_data.atWithInsertion(StorageIndex(i));
}
public:
typedef typename Base::InnerIterator InnerIterator;
typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
inline void setZero() { m_data.clear(); }
/** \returns the number of non zero coefficients */
inline Index nonZeros() const { return m_data.size(); }
inline void startVec(Index outer)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
}
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
return insertBack(inner);
}
inline Scalar& insertBack(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
return insertBackUnordered(inner);
}
inline Scalar& insertBackUnordered(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
inline Scalar& insert(Index row, Index col)
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
Index inner = IsColVector ? row : col;
Index outer = IsColVector ? col : row;
EIGEN_ONLY_USED_FOR_DEBUG(outer);
eigen_assert(outer==0);
return insert(inner);
}
Scalar& insert(Index i)
{
eigen_assert(i>=0 && i<m_size);
Index startId = 0;
Index p = Index(m_data.size()) - 1;
// TODO smart realloc
m_data.resize(p+2,1);
while ( (p >= startId) && (m_data.index(p) > i) )
{
m_data.index(p+1) = m_data.index(p);
m_data.value(p+1) = m_data.value(p);
--p;
}
m_data.index(p+1) = convert_index(i);
m_data.value(p+1) = 0;
return m_data.value(p+1);
}
/**
*/
inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
inline void finalize() {}
/** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
m_data.prune(reference,epsilon);
}
/** Resizes the sparse vector to \a rows x \a cols
*
* This method is provided for compatibility with matrices.
* For a column vector, \a cols must be equal to 1.
* For a row vector, \a rows must be equal to 1.
*
* \sa resize(Index)
*/
void resize(Index rows, Index cols)
{
eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1");
resize(IsColVector ? rows : cols);
}
/** Resizes the sparse vector to \a newSize
* This method deletes all entries, thus leaving an empty sparse vector
*
* \sa conservativeResize(), setZero() */
void resize(Index newSize)
{
m_size = newSize;
m_data.clear();
}
/** Resizes the sparse vector to \a newSize, while leaving old values untouched.
*
* If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved.
* Call .data().squeeze() to free extra memory.
*
* \sa reserve(), setZero()
*/
void conservativeResize(Index newSize)
{
if (newSize < m_size)
{
Index i = 0;
while (i<m_data.size() && m_data.index(i)<newSize) ++i;
m_data.resize(i);
}
m_size = newSize;
}
void resizeNonZeros(Index size) { m_data.resize(size); }
inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
explicit inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
template<typename OtherDerived>
inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
: m_size(0)
{
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
check_template_parameters();
*this = other.derived();
}
inline SparseVector(const SparseVector& other)
: Base(other), m_size(0)
{
check_template_parameters();
*this = other.derived();
}
/** Swaps the values of \c *this and \a other.
* Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
* \sa SparseMatrixBase::swap()
*/
inline void swap(SparseVector& other)
{
std::swap(m_size, other.m_size);
m_data.swap(other.m_data);
}
template<int OtherOptions>
inline void swap(SparseMatrix<Scalar,OtherOptions,StorageIndex>& other)
{
eigen_assert(other.outerSize()==1);
std::swap(m_size, other.m_innerSize);
m_data.swap(other.m_data);
}
inline SparseVector& operator=(const SparseVector& other)
{
if (other.isRValue())
{
swap(other.const_cast_derived());
}
else
{
resize(other.size());
m_data = other.m_data;
}
return *this;
}
template<typename OtherDerived>
inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
{
SparseVector tmp(other.size());
internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
this->swap(tmp);
return *this;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Lhs, typename Rhs>
inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
{
return Base::operator=(product);
}
#endif
friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
{
for (Index i=0; i<m.nonZeros(); ++i)
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
s << std::endl;
return s;
}
/** Destructor */
inline ~SparseVector() {}
/** Overloaded for performance */
Scalar sum() const;
public:
/** \internal \deprecated use setZero() and reserve() */
EIGEN_DEPRECATED void startFill(Index reserve)
{
setZero();
m_data.reserve(reserve);
}
/** \internal \deprecated use insertBack(Index,Index) */
EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
{
eigen_assert(r==0 || c==0);
return fill(IsColVector ? r : c);
}
/** \internal \deprecated use insertBack(Index) */
EIGEN_DEPRECATED Scalar& fill(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
/** \internal \deprecated use insert(Index,Index) */
EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
{
eigen_assert(r==0 || c==0);
return fillrand(IsColVector ? r : c);
}
/** \internal \deprecated use insert(Index) */
EIGEN_DEPRECATED Scalar& fillrand(Index i)
{
return insert(i);
}
/** \internal \deprecated use finalize() */
EIGEN_DEPRECATED void endFill() {}
// These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
/** \internal \deprecated use data() */
EIGEN_DEPRECATED Storage& _data() { return m_data; }
/** \internal \deprecated use data() */
EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
# ifdef EIGEN_SPARSEVECTOR_PLUGIN
# include EIGEN_SPARSEVECTOR_PLUGIN
# endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
Storage m_data;
Index m_size;
};
namespace internal {
template<typename _Scalar, int _Options, typename _Index>
struct evaluator<SparseVector<_Scalar,_Options,_Index> >
: evaluator_base<SparseVector<_Scalar,_Options,_Index> >
{
typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType;
typedef evaluator_base<SparseVectorType> Base;
typedef typename SparseVectorType::InnerIterator InnerIterator;
typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
enum {
CoeffReadCost = NumTraits<_Scalar>::ReadCost,
Flags = SparseVectorType::Flags
};
evaluator() : Base() {}
explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_matrix->nonZeros();
}
operator SparseVectorType&() { return m_matrix->const_cast_derived(); }
operator const SparseVectorType&() const { return *m_matrix; }
const SparseVectorType *m_matrix;
};
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
static void run(Dest& dst, const Src& src) {
eigen_internal_assert(src.innerSize()==src.size());
typedef internal::evaluator<Src> SrcEvaluatorType;
SrcEvaluatorType srcEval(src);
for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)
dst.insert(it.index()) = it.value();
}
};
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
static void run(Dest& dst, const Src& src) {
eigen_internal_assert(src.outerSize()==src.size());
typedef internal::evaluator<Src> SrcEvaluatorType;
SrcEvaluatorType srcEval(src);
for(Index i=0; i<src.size(); ++i)
{
typename SrcEvaluatorType::InnerIterator it(srcEval, i);
if(it)
dst.insert(i) = it.value();
}
}
};
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
static void run(Dest& dst, const Src& src) {
if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSEVECTOR_H
| 14,831 | 29.964509 | 120 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseView.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <[email protected]>
// Copyright (C) 2010 Daniel Lowengrub <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEVIEW_H
#define EIGEN_SPARSEVIEW_H
namespace Eigen {
namespace internal {
template<typename MatrixType>
struct traits<SparseView<MatrixType> > : traits<MatrixType>
{
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Sparse StorageKind;
enum {
Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
};
};
} // end namespace internal
/** \ingroup SparseCore_Module
* \class SparseView
*
* \brief Expression of a dense or sparse matrix with zero or too small values removed
*
* \tparam MatrixType the type of the object of which we are removing the small entries
*
* This class represents an expression of a given dense or sparse matrix with
* entries smaller than \c reference * \c epsilon are removed.
* It is the return type of MatrixBase::sparseView() and SparseMatrixBase::pruned()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::sparseView(), SparseMatrixBase::pruned()
*/
template<typename MatrixType>
class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
{
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
typedef SparseMatrixBase<SparseView > Base;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0),
const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision())
: m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
inline Index innerSize() const { return m_matrix.innerSize(); }
inline Index outerSize() const { return m_matrix.outerSize(); }
/** \returns the nested expression */
const typename internal::remove_all<MatrixTypeNested>::type&
nestedExpression() const { return m_matrix; }
Scalar reference() const { return m_reference; }
RealScalar epsilon() const { return m_epsilon; }
protected:
MatrixTypeNested m_matrix;
Scalar m_reference;
RealScalar m_epsilon;
};
namespace internal {
// TODO find a way to unify the two following variants
// This is tricky because implementing an inner iterator on top of an IndexBased evaluator is
// not easy because the evaluators do not expose the sizes of the underlying expression.
template<typename ArgType>
struct unary_evaluator<SparseView<ArgType>, IteratorBased>
: public evaluator_base<SparseView<ArgType> >
{
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
public:
typedef SparseView<ArgType> XprType;
class InnerIterator : public EvalIterator
{
typedef typename XprType::Scalar Scalar;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
: EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view)
{
incrementToNonZero();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
EvalIterator::operator++();
incrementToNonZero();
return *this;
}
using EvalIterator::value;
protected:
const XprType &m_view;
private:
void incrementToNonZero()
{
while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon()))
{
EvalIterator::operator++();
}
}
};
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
protected:
evaluator<ArgType> m_argImpl;
const XprType &m_view;
};
template<typename ArgType>
struct unary_evaluator<SparseView<ArgType>, IndexBased>
: public evaluator_base<SparseView<ArgType> >
{
public:
typedef SparseView<ArgType> XprType;
protected:
enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
typedef typename XprType::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
public:
class InnerIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
: m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize())
{
incrementToNonZero();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
m_inner++;
incrementToNonZero();
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const
{
return (IsRowMajor) ? m_sve.m_argImpl.coeff(m_outer, m_inner)
: m_sve.m_argImpl.coeff(m_inner, m_outer);
}
EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; }
inline Index row() const { return IsRowMajor ? m_outer : index(); }
inline Index col() const { return IsRowMajor ? index() : m_outer; }
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
protected:
const unary_evaluator &m_sve;
Index m_inner;
const Index m_outer;
const Index m_end;
private:
void incrementToNonZero()
{
while((bool(*this)) && internal::isMuchSmallerThan(value(), m_sve.m_view.reference(), m_sve.m_view.epsilon()))
{
m_inner++;
}
}
};
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
protected:
evaluator<ArgType> m_argImpl;
const XprType &m_view;
};
} // end namespace internal
/** \ingroup SparseCore_Module
*
* \returns a sparse expression of the dense expression \c *this with values smaller than
* \a reference * \a epsilon removed.
*
* This method is typically used when prototyping to convert a quickly assembled dense Matrix \c D to a SparseMatrix \c S:
* \code
* MatrixXd D(n,m);
* SparseMatrix<double> S;
* S = D.sparseView(); // suppress numerical zeros (exact)
* S = D.sparseView(reference);
* S = D.sparseView(reference,epsilon);
* \endcode
* where \a reference is a meaningful non zero reference value,
* and \a epsilon is a tolerance factor defaulting to NumTraits<Scalar>::dummy_precision().
*
* \sa SparseMatrixBase::pruned(), class SparseView */
template<typename Derived>
const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& reference,
const typename NumTraits<Scalar>::Real& epsilon) const
{
return SparseView<Derived>(derived(), reference, epsilon);
}
/** \returns an expression of \c *this with values smaller than
* \a reference * \a epsilon removed.
*
* This method is typically used in conjunction with the product of two sparse matrices
* to automatically prune the smallest values as follows:
* \code
* C = (A*B).pruned(); // suppress numerical zeros (exact)
* C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon);
* \endcode
* where \c ref is a meaningful non zero reference value.
* */
template<typename Derived>
const SparseView<Derived>
SparseMatrixBase<Derived>::pruned(const Scalar& reference,
const RealScalar& epsilon) const
{
return SparseView<Derived>(derived(), reference, epsilon);
}
} // end namespace Eigen
#endif
| 8,110 | 30.933071 | 123 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/TriangularSolver.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
#define EIGEN_SPARSETRIANGULARSOLVER_H
namespace Eigen {
namespace internal {
template<typename Lhs, typename Rhs, int Mode,
int UpLo = (Mode & Lower)
? Lower
: (Mode & Upper)
? Upper
: -1,
int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
struct sparse_solve_triangular_selector;
// forward substitution, row-major
template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef evaluator<Lhs> LhsEval;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
LhsEval lhsEval(lhs);
for(Index col=0 ; col<other.cols() ; ++col)
{
for(Index i=0; i<lhs.rows(); ++i)
{
Scalar tmp = other.coeff(i,col);
Scalar lastVal(0);
Index lastIndex = 0;
for(LhsIterator it(lhsEval, i); it; ++it)
{
lastVal = it.value();
lastIndex = it.index();
if(lastIndex==i)
break;
tmp -= lastVal * other.coeff(lastIndex,col);
}
if (Mode & UnitDiag)
other.coeffRef(i,col) = tmp;
else
{
eigen_assert(lastIndex==i);
other.coeffRef(i,col) = tmp/lastVal;
}
}
}
}
};
// backward substitution, row-major
template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef evaluator<Lhs> LhsEval;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
LhsEval lhsEval(lhs);
for(Index col=0 ; col<other.cols() ; ++col)
{
for(Index i=lhs.rows()-1 ; i>=0 ; --i)
{
Scalar tmp = other.coeff(i,col);
Scalar l_ii(0);
LhsIterator it(lhsEval, i);
while(it && it.index()<i)
++it;
if(!(Mode & UnitDiag))
{
eigen_assert(it && it.index()==i);
l_ii = it.value();
++it;
}
else if (it && it.index() == i)
++it;
for(; it; ++it)
{
tmp -= it.value() * other.coeff(it.index(),col);
}
if (Mode & UnitDiag) other.coeffRef(i,col) = tmp;
else other.coeffRef(i,col) = tmp/l_ii;
}
}
}
};
// forward substitution, col-major
template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef evaluator<Lhs> LhsEval;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
LhsEval lhsEval(lhs);
for(Index col=0 ; col<other.cols() ; ++col)
{
for(Index i=0; i<lhs.cols(); ++i)
{
Scalar& tmp = other.coeffRef(i,col);
if (tmp!=Scalar(0)) // optimization when other is actually sparse
{
LhsIterator it(lhsEval, i);
while(it && it.index()<i)
++it;
if(!(Mode & UnitDiag))
{
eigen_assert(it && it.index()==i);
tmp /= it.value();
}
if (it && it.index()==i)
++it;
for(; it; ++it)
other.coeffRef(it.index(), col) -= tmp * it.value();
}
}
}
}
};
// backward substitution, col-major
template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef evaluator<Lhs> LhsEval;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
LhsEval lhsEval(lhs);
for(Index col=0 ; col<other.cols() ; ++col)
{
for(Index i=lhs.cols()-1; i>=0; --i)
{
Scalar& tmp = other.coeffRef(i,col);
if (tmp!=Scalar(0)) // optimization when other is actually sparse
{
if(!(Mode & UnitDiag))
{
// TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements
LhsIterator it(lhsEval, i);
while(it && it.index()!=i)
++it;
eigen_assert(it && it.index()==i);
other.coeffRef(i,col) /= it.value();
}
LhsIterator it(lhsEval, i);
for(; it && it.index()<i; ++it)
other.coeffRef(it.index(), col) -= tmp * it.value();
}
}
}
}
};
} // end namespace internal
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename ExpressionType,unsigned int Mode>
template<typename OtherDerived>
void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(MatrixBase<OtherDerived>& other) const
{
eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
typedef typename internal::conditional<copy,
typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
OtherCopy otherCopy(other.derived());
internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(derived().nestedExpression(), otherCopy);
if (copy)
other = otherCopy;
}
#endif
// pure sparse path
namespace internal {
template<typename Lhs, typename Rhs, int Mode,
int UpLo = (Mode & Lower)
? Lower
: (Mode & Upper)
? Upper
: -1,
int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
struct sparse_solve_triangular_sparse_selector;
// forward substitution, col-major
template<typename Lhs, typename Rhs, int Mode, int UpLo>
struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,
typename traits<Rhs>::StorageIndex>::type StorageIndex;
static void run(const Lhs& lhs, Rhs& other)
{
const bool IsLower = (UpLo==Lower);
AmbiVector<Scalar,StorageIndex> tempVector(other.rows()*2);
tempVector.setBounds(0,other.rows());
Rhs res(other.rows(), other.cols());
res.reserve(other.nonZeros());
for(Index col=0 ; col<other.cols() ; ++col)
{
// FIXME estimate number of non zeros
tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
tempVector.setZero();
tempVector.restart();
for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)
{
tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
}
for(Index i=IsLower?0:lhs.cols()-1;
IsLower?i<lhs.cols():i>=0;
i+=IsLower?1:-1)
{
tempVector.restart();
Scalar& ci = tempVector.coeffRef(i);
if (ci!=Scalar(0))
{
// find
typename Lhs::InnerIterator it(lhs, i);
if(!(Mode & UnitDiag))
{
if (IsLower)
{
eigen_assert(it.index()==i);
ci /= it.value();
}
else
ci /= lhs.coeff(i,i);
}
tempVector.restart();
if (IsLower)
{
if (it.index()==i)
++it;
for(; it; ++it)
tempVector.coeffRef(it.index()) -= ci * it.value();
}
else
{
for(; it && it.index()<i; ++it)
tempVector.coeffRef(it.index()) -= ci * it.value();
}
}
}
Index count = 0;
// FIXME compute a reference value to filter zeros
for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector/*,1e-12*/); it; ++it)
{
++ count;
// std::cerr << "fill " << it.index() << ", " << col << "\n";
// std::cout << it.value() << " ";
// FIXME use insertBack
res.insert(it.index(), col) = it.value();
}
// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
}
res.finalize();
other = res.markAsRValue();
}
};
} // end namespace internal
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename ExpressionType,unsigned int Mode>
template<typename OtherDerived>
void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
{
eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
// typedef typename internal::conditional<copy,
// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
// OtherCopy otherCopy(other.derived());
internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(derived().nestedExpression(), other.derived());
// if (copy)
// other = otherCopy;
}
#endif
} // end namespace Eigen
#endif // EIGEN_SPARSETRIANGULARSOLVER_H
| 9,657 | 29.563291 | 167 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
// Copyright (C) 2012-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_LU_H
#define EIGEN_SPARSE_LU_H
namespace Eigen {
template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::StorageIndex> > class SparseLU;
template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
/** \ingroup SparseLU_Module
* \class SparseLU
*
* \brief Sparse supernodal LU factorization for general matrices
*
* This class implements the supernodal LU factorization for general matrices.
* It uses the main techniques from the sequential SuperLU package
* (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real
* and complex arithmetics with single and double precision, depending on the
* scalar type of your input matrix.
* The code has been optimized to provide BLAS-3 operations during supernode-panel updates.
* It benefits directly from the built-in high-performant Eigen BLAS routines.
* Moreover, when the size of a supernode is very small, the BLAS calls are avoided to
* enable a better optimization from the compiler. For best performance,
* you should compile it with NDEBUG flag to avoid the numerous bounds checking on vectors.
*
* An important parameter of this class is the ordering method. It is used to reorder the columns
* (and eventually the rows) of the matrix to reduce the number of new elements that are created during
* numerical factorization. The cheapest method available is COLAMD.
* See \link OrderingMethods_Module the OrderingMethods module \endlink for the list of
* built-in and external ordering methods.
*
* Simple example with key steps
* \code
* VectorXd x(n), b(n);
* SparseMatrix<double, ColMajor> A;
* SparseLU<SparseMatrix<scalar, ColMajor>, COLAMDOrdering<Index> > solver;
* // fill A and b;
* // Compute the ordering permutation vector from the structural pattern of A
* solver.analyzePattern(A);
* // Compute the numerical factorization
* solver.factorize(A);
* //Use the factors to solve the linear system
* x = solver.solve(b);
* \endcode
*
* \warning The input matrix A should be in a \b compressed and \b column-major form.
* Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
*
* \note Unlike the initial SuperLU implementation, there is no step to equilibrate the matrix.
* For badly scaled matrices, this step can be useful to reduce the pivoting during factorization.
* If this is the case for your matrices, you can try the basic scaling method at
* "unsupported/Eigen/src/IterativeSolvers/Scaling.h"
*
* \tparam _MatrixType The type of the sparse matrix. It must be a column-major SparseMatrix<>
* \tparam _OrderingType The ordering method to use, either AMD, COLAMD or METIS. Default is COLMAD
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept
* \sa \ref OrderingMethods_Module
*/
template <typename _MatrixType, typename _OrderingType>
class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >, public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::StorageIndex>
{
protected:
typedef SparseSolverBase<SparseLU<_MatrixType,_OrderingType> > APIBase;
using APIBase::m_isInitialized;
public:
using APIBase::_solve_impl;
typedef _MatrixType MatrixType;
typedef _OrderingType OrderingType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> NCMatrix;
typedef internal::MappedSuperNodalMatrix<Scalar, StorageIndex> SCMatrix;
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
typedef internal::SparseLUImpl<Scalar, StorageIndex> Base;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public:
SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
{
initperfvalues();
}
explicit SparseLU(const MatrixType& matrix)
: m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
{
initperfvalues();
compute(matrix);
}
~SparseLU()
{
// Free all explicit dynamic pointers
}
void analyzePattern (const MatrixType& matrix);
void factorize (const MatrixType& matrix);
void simplicialfactorize(const MatrixType& matrix);
/**
* Compute the symbolic and numeric factorization of the input sparse matrix.
* The input matrix should be in column-major storage.
*/
void compute (const MatrixType& matrix)
{
// Analyze
analyzePattern(matrix);
//Factorize
factorize(matrix);
}
inline Index rows() const { return m_mat.rows(); }
inline Index cols() const { return m_mat.cols(); }
/** Indicate that the pattern of the input matrix is symmetric */
void isSymmetric(bool sym)
{
m_symmetricmode = sym;
}
/** \returns an expression of the matrix L, internally stored as supernodes
* The only operation available with this expression is the triangular solve
* \code
* y = b; matrixL().solveInPlace(y);
* \endcode
*/
SparseLUMatrixLReturnType<SCMatrix> matrixL() const
{
return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
}
/** \returns an expression of the matrix U,
* The only operation available with this expression is the triangular solve
* \code
* y = b; matrixU().solveInPlace(y);
* \endcode
*/
SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,StorageIndex> > matrixU() const
{
return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,StorageIndex> >(m_Lstore, m_Ustore);
}
/**
* \returns a reference to the row matrix permutation \f$ P_r \f$ such that \f$P_r A P_c^T = L U\f$
* \sa colsPermutation()
*/
inline const PermutationType& rowsPermutation() const
{
return m_perm_r;
}
/**
* \returns a reference to the column matrix permutation\f$ P_c^T \f$ such that \f$P_r A P_c^T = L U\f$
* \sa rowsPermutation()
*/
inline const PermutationType& colsPermutation() const
{
return m_perm_c;
}
/** Set the threshold used for a diagonal entry to be an acceptable pivot. */
void setPivotThreshold(const RealScalar& thresh)
{
m_diagpivotthresh = thresh;
}
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
*
* \warning the destination matrix X in X = this->solve(B) must be colmun-major.
*
* \sa compute()
*/
template<typename Rhs>
inline const Solve<SparseLU, Rhs> solve(const MatrixBase<Rhs>& B) const;
#endif // EIGEN_PARSED_BY_DOXYGEN
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
* \c InvalidInput if the input matrix is invalid
*
* \sa iparm()
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
/**
* \returns A string describing the type of error
*/
std::string lastErrorMessage() const
{
return m_lastError;
}
template<typename Rhs, typename Dest>
bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const
{
Dest& X(X_base.derived());
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first");
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
// Permute the right hand side to form X = Pr*B
// on return, X is overwritten by the computed solution
X.resize(B.rows(),B.cols());
// this ugly const_cast_derived() helps to detect aliasing when applying the permutations
for(Index j = 0; j < B.cols(); ++j)
X.col(j) = rowsPermutation() * B.const_cast_derived().col(j);
//Forward substitution with L
this->matrixL().solveInPlace(X);
this->matrixU().solveInPlace(X);
// Permute back the solution
for (Index j = 0; j < B.cols(); ++j)
X.col(j) = colsPermutation().inverse() * X.col(j);
return true;
}
/**
* \returns the absolute value of the determinant of the matrix of which
* *this is the QR decomposition.
*
* \warning a determinant can be very big or small, so for matrices
* of large enough dimension, there is a risk of overflow/underflow.
* One way to work around that is to use logAbsDeterminant() instead.
*
* \sa logAbsDeterminant(), signDeterminant()
*/
Scalar absDeterminant()
{
using std::abs;
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
// Initialize with the determinant of the row matrix
Scalar det = Scalar(1.);
// Note that the diagonal blocks of U are stored in supernodes,
// which are available in the L part :)
for (Index j = 0; j < this->cols(); ++j)
{
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
{
if(it.index() == j)
{
det *= abs(it.value());
break;
}
}
}
return det;
}
/** \returns the natural log of the absolute value of the determinant of the matrix
* of which **this is the QR decomposition
*
* \note This method is useful to work around the risk of overflow/underflow that's
* inherent to the determinant computation.
*
* \sa absDeterminant(), signDeterminant()
*/
Scalar logAbsDeterminant() const
{
using std::log;
using std::abs;
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
Scalar det = Scalar(0.);
for (Index j = 0; j < this->cols(); ++j)
{
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
{
if(it.row() < j) continue;
if(it.row() == j)
{
det += log(abs(it.value()));
break;
}
}
}
return det;
}
/** \returns A number representing the sign of the determinant
*
* \sa absDeterminant(), logAbsDeterminant()
*/
Scalar signDeterminant()
{
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
// Initialize with the determinant of the row matrix
Index det = 1;
// Note that the diagonal blocks of U are stored in supernodes,
// which are available in the L part :)
for (Index j = 0; j < this->cols(); ++j)
{
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
{
if(it.index() == j)
{
if(it.value()<0)
det = -det;
else if(it.value()==0)
return 0;
break;
}
}
}
return det * m_detPermR * m_detPermC;
}
/** \returns The determinant of the matrix.
*
* \sa absDeterminant(), logAbsDeterminant()
*/
Scalar determinant()
{
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
// Initialize with the determinant of the row matrix
Scalar det = Scalar(1.);
// Note that the diagonal blocks of U are stored in supernodes,
// which are available in the L part :)
for (Index j = 0; j < this->cols(); ++j)
{
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
{
if(it.index() == j)
{
det *= it.value();
break;
}
}
}
return (m_detPermR * m_detPermC) > 0 ? det : -det;
}
protected:
// Functions
void initperfvalues()
{
m_perfv.panel_size = 16;
m_perfv.relax = 1;
m_perfv.maxsuper = 128;
m_perfv.rowblk = 16;
m_perfv.colblk = 8;
m_perfv.fillfactor = 20;
}
// Variables
mutable ComputationInfo m_info;
bool m_factorizationIsOk;
bool m_analysisIsOk;
std::string m_lastError;
NCMatrix m_mat; // The input (permuted ) matrix
SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
MappedSparseMatrix<Scalar,ColMajor,StorageIndex> m_Ustore; // The upper triangular matrix
PermutationType m_perm_c; // Column permutation
PermutationType m_perm_r ; // Row permutation
IndexVector m_etree; // Column elimination tree
typename Base::GlobalLU_t m_glu;
// SparseLU options
bool m_symmetricmode;
// values for performance
internal::perfvalues m_perfv;
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
Index m_detPermR, m_detPermC; // Determinants of the permutation matrices
private:
// Disable copy constructor
SparseLU (const SparseLU& );
}; // End class SparseLU
// Functions needed by the anaysis phase
/**
* Compute the column permutation to minimize the fill-in
*
* - Apply this permutation to the input matrix -
*
* - Compute the column elimination tree on the permuted matrix
*
* - Postorder the elimination tree and the column permutation
*
*/
template <typename MatrixType, typename OrderingType>
void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
{
//TODO It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat.
// Firstly, copy the whole input matrix.
m_mat = mat;
// Compute fill-in ordering
OrderingType ord;
ord(m_mat,m_perm_c);
// Apply the permutation to the column of the input matrix
if (m_perm_c.size())
{
m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
// Then, permute only the column pointers
ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast<StorageIndex*>(mat.outerIndexPtr()):0);
// If the input matrix 'mat' is uncompressed, then the outer-indices do not match the ones of m_mat, and a copy is thus needed.
if(!mat.isCompressed())
IndexVector::Map(outerIndexPtr, mat.cols()+1) = IndexVector::Map(m_mat.outerIndexPtr(),mat.cols()+1);
// Apply the permutation and compute the nnz per column.
for (Index i = 0; i < mat.cols(); i++)
{
m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
}
}
// Compute the column elimination tree of the permuted matrix
IndexVector firstRowElt;
internal::coletree(m_mat, m_etree,firstRowElt);
// In symmetric mode, do not do postorder here
if (!m_symmetricmode) {
IndexVector post, iwork;
// Post order etree
internal::treePostorder(StorageIndex(m_mat.cols()), m_etree, post);
// Renumber etree in postorder
Index m = m_mat.cols();
iwork.resize(m+1);
for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
m_etree = iwork;
// Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
PermutationType post_perm(m);
for (Index i = 0; i < m; i++)
post_perm.indices()(i) = post(i);
// Combine the two permutations : postorder the permutation for future use
if(m_perm_c.size()) {
m_perm_c = post_perm * m_perm_c;
}
} // end postordering
m_analysisIsOk = true;
}
// Functions needed by the numerical factorization phase
/**
* - Numerical factorization
* - Interleaved with the symbolic factorization
* On exit, info is
*
* = 0: successful factorization
*
* > 0: if info = i, and i is
*
* <= A->ncol: U(i,i) is exactly zero. The factorization has
* been completed, but the factor U is exactly singular,
* and division by zero will occur if it is used to solve a
* system of equations.
*
* > A->ncol: number of bytes allocated when memory allocation
* failure occurred, plus A->ncol. If lwork = -1, it is
* the estimated amount of space needed, plus A->ncol.
*/
template <typename MatrixType, typename OrderingType>
void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
{
using internal::emptyIdxLU;
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
typedef typename IndexVector::Scalar StorageIndex;
m_isInitialized = true;
// Apply the column permutation computed in analyzepattern()
// m_mat = matrix * m_perm_c.inverse();
m_mat = matrix;
if (m_perm_c.size())
{
m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
//Then, permute only the column pointers
const StorageIndex * outerIndexPtr;
if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
else
{
StorageIndex* outerIndexPtr_t = new StorageIndex[matrix.cols()+1];
for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
outerIndexPtr = outerIndexPtr_t;
}
for (Index i = 0; i < matrix.cols(); i++)
{
m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
}
if(!matrix.isCompressed()) delete[] outerIndexPtr;
}
else
{ //FIXME This should not be needed if the empty permutation is handled transparently
m_perm_c.resize(matrix.cols());
for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
}
Index m = m_mat.rows();
Index n = m_mat.cols();
Index nnz = m_mat.nonZeros();
Index maxpanel = m_perfv.panel_size * m;
// Allocate working storage common to the factor routines
Index lwork = 0;
Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
if (info)
{
m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
m_factorizationIsOk = false;
return ;
}
// Set up pointers for integer working arrays
IndexVector segrep(m); segrep.setZero();
IndexVector parent(m); parent.setZero();
IndexVector xplore(m); xplore.setZero();
IndexVector repfnz(maxpanel);
IndexVector panel_lsub(maxpanel);
IndexVector xprune(n); xprune.setZero();
IndexVector marker(m*internal::LUNoMarker); marker.setZero();
repfnz.setConstant(-1);
panel_lsub.setConstant(-1);
// Set up pointers for scalar working arrays
ScalarVector dense;
dense.setZero(maxpanel);
ScalarVector tempv;
tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );
// Compute the inverse of perm_c
PermutationType iperm_c(m_perm_c.inverse());
// Identify initial relaxed snodes
IndexVector relax_end(n);
if ( m_symmetricmode == true )
Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
else
Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
m_perm_r.resize(m);
m_perm_r.indices().setConstant(-1);
marker.setConstant(-1);
m_detPermR = 1; // Record the determinant of the row permutation
m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
// Work on one 'panel' at a time. A panel is one of the following :
// (a) a relaxed supernode at the bottom of the etree, or
// (b) panel_size contiguous columns, <panel_size> defined by the user
Index jcol;
IndexVector panel_histo(n);
Index pivrow; // Pivotal row number in the original row matrix
Index nseg1; // Number of segments in U-column above panel row jcol
Index nseg; // Number of segments in each U-column
Index irep;
Index i, k, jj;
for (jcol = 0; jcol < n; )
{
// Adjust panel size so that a panel won't overlap with the next relaxed snode.
Index panel_size = m_perfv.panel_size; // upper bound on panel width
for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
{
if (relax_end(k) != emptyIdxLU)
{
panel_size = k - jcol;
break;
}
}
if (k == n)
panel_size = n - jcol;
// Symbolic outer factorization on a panel of columns
Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
// Numeric sup-panel updates in topological order
Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
// Sparse LU within the panel, and below the panel diagonal
for ( jj = jcol; jj< jcol + panel_size; jj++)
{
k = (jj - jcol) * m; // Column index for w-wide arrays
nseg = nseg1; // begin after all the panel segments
//Depth-first-search for the current column
VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
VectorBlock<IndexVector> repfnz_k(repfnz, k, m);
info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
if ( info )
{
m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
m_info = NumericalIssue;
m_factorizationIsOk = false;
return;
}
// Numeric updates to this column
VectorBlock<ScalarVector> dense_k(dense, k, m);
VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1);
info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
if ( info )
{
m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
m_info = NumericalIssue;
m_factorizationIsOk = false;
return;
}
// Copy the U-segments to ucol(*)
info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
if ( info )
{
m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
m_info = NumericalIssue;
m_factorizationIsOk = false;
return;
}
// Form the L-segment
info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
if ( info )
{
m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
std::ostringstream returnInfo;
returnInfo << info;
m_lastError += returnInfo.str();
m_info = NumericalIssue;
m_factorizationIsOk = false;
return;
}
// Update the determinant of the row permutation matrix
// FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.
if (pivrow != jj) m_detPermR = -m_detPermR;
// Prune columns (0:jj-1) using column jj
Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
// Reset repfnz for this column
for (i = 0; i < nseg; i++)
{
irep = segrep(i);
repfnz_k(irep) = emptyIdxLU;
}
} // end SparseLU within the panel
jcol += panel_size; // Move to the next panel
} // end for -- end elimination
m_detPermR = m_perm_r.determinant();
m_detPermC = m_perm_c.determinant();
// Count the number of nonzeros in factors
Base::countnz(n, m_nnzL, m_nnzU, m_glu);
// Apply permutation to the L subscripts
Base::fixupL(n, m_perm_r.indices(), m_glu);
// Create supernode matrix L
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
// Create the column major upper sparse matrix U;
new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, StorageIndex> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
m_info = Success;
m_factorizationIsOk = true;
}
template<typename MappedSupernodalType>
struct SparseLUMatrixLReturnType : internal::no_assignment_operator
{
typedef typename MappedSupernodalType::Scalar Scalar;
explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
{ }
Index rows() { return m_mapL.rows(); }
Index cols() { return m_mapL.cols(); }
template<typename Dest>
void solveInPlace( MatrixBase<Dest> &X) const
{
m_mapL.solveInPlace(X);
}
const MappedSupernodalType& m_mapL;
};
template<typename MatrixLType, typename MatrixUType>
struct SparseLUMatrixUReturnType : internal::no_assignment_operator
{
typedef typename MatrixLType::Scalar Scalar;
SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
: m_mapL(mapL),m_mapU(mapU)
{ }
Index rows() { return m_mapL.rows(); }
Index cols() { return m_mapL.cols(); }
template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const
{
Index nrhs = X.cols();
Index n = X.rows();
// Backward solve with U
for (Index k = m_mapL.nsuper(); k >= 0; k--)
{
Index fsupc = m_mapL.supToCol()[k];
Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
Index luptr = m_mapL.colIndexPtr()[fsupc];
if (nsupc == 1)
{
for (Index j = 0; j < nrhs; j++)
{
X(fsupc, j) /= m_mapL.valuePtr()[luptr];
}
}
else
{
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
U = A.template triangularView<Upper>().solve(U);
}
for (Index j = 0; j < nrhs; ++j)
{
for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
{
typename MatrixUType::InnerIterator it(m_mapU, jcol);
for ( ; it; ++it)
{
Index irow = it.index();
X(irow, j) -= X(jcol, j) * it.value();
}
}
}
} // End For U-solve
}
const MatrixLType& m_mapL;
const MatrixUType& m_mapU;
};
} // End namespace Eigen
#endif
| 27,921 | 34.981959 | 175 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLUImpl.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef SPARSELU_IMPL_H
#define SPARSELU_IMPL_H
namespace Eigen {
namespace internal {
/** \ingroup SparseLU_Module
* \class SparseLUImpl
* Base class for sparseLU
*/
template <typename Scalar, typename StorageIndex>
class SparseLUImpl
{
public:
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
typedef Matrix<Scalar,Dynamic,Dynamic,ColMajor> ScalarMatrix;
typedef Map<ScalarMatrix, 0, OuterStride<> > MappedMatrixBlock;
typedef typename ScalarVector::RealScalar RealScalar;
typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;
typedef Ref<Matrix<StorageIndex,Dynamic,1> > BlockIndexVector;
typedef LU_GlobalLU_t<IndexVector, ScalarVector> GlobalLU_t;
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> MatrixType;
protected:
template <typename VectorType>
Index expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions);
Index memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu);
template <typename VectorType>
Index memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions);
void heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end);
void relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end);
Index snode_dfs(const Index jcol, const Index kcol,const MatrixType& mat, IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu);
Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu);
Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu);
template <typename Traits>
void dfs_kernel(const StorageIndex jj, IndexVector& perm_r,
Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits);
void panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
void panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu);
Index column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
Index column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu);
Index copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu);
void pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu);
void countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu);
void fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu);
template<typename , typename >
friend struct column_dfs_traits;
};
} // end namespace internal
} // namespace Eigen
#endif
| 4,301 | 63.208955 | 306 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_Memory.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of [s,d,c,z]memory.c files in SuperLU
* -- SuperLU routine (version 3.1) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* August 1, 2008
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef EIGEN_SPARSELU_MEMORY
#define EIGEN_SPARSELU_MEMORY
namespace Eigen {
namespace internal {
enum { LUNoMarker = 3 };
enum {emptyIdxLU = -1};
inline Index LUnumTempV(Index& m, Index& w, Index& t, Index& b)
{
return (std::max)(m, (t+b)*w);
}
template< typename Scalar>
inline Index LUTempSpace(Index&m, Index& w)
{
return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar);
}
/**
* Expand the existing storage to accomodate more fill-ins
* \param vec Valid pointer to the vector to allocate or expand
* \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
* \param[in] nbElts Current number of elements in the factors
* \param keep_prev 1: use length and do not expand the vector; 0: compute new_len and expand
* \param[in,out] num_expansions Number of times the memory has been expanded
*/
template <typename Scalar, typename StorageIndex>
template <typename VectorType>
Index SparseLUImpl<Scalar,StorageIndex>::expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions)
{
float alpha = 1.5; // Ratio of the memory increase
Index new_len; // New size of the allocated memory
if(num_expansions == 0 || keep_prev)
new_len = length ; // First time allocate requested
else
new_len = (std::max)(length+1,Index(alpha * length));
VectorType old_vec; // Temporary vector to hold the previous values
if (nbElts > 0 )
old_vec = vec.segment(0,nbElts);
//Allocate or expand the current vector
#ifdef EIGEN_EXCEPTIONS
try
#endif
{
vec.resize(new_len);
}
#ifdef EIGEN_EXCEPTIONS
catch(std::bad_alloc& )
#else
if(!vec.size())
#endif
{
if (!num_expansions)
{
// First time to allocate from LUMemInit()
// Let LUMemInit() deals with it.
return -1;
}
if (keep_prev)
{
// In this case, the memory length should not not be reduced
return new_len;
}
else
{
// Reduce the size and increase again
Index tries = 0; // Number of attempts
do
{
alpha = (alpha + 1)/2;
new_len = (std::max)(length+1,Index(alpha * length));
#ifdef EIGEN_EXCEPTIONS
try
#endif
{
vec.resize(new_len);
}
#ifdef EIGEN_EXCEPTIONS
catch(std::bad_alloc& )
#else
if (!vec.size())
#endif
{
tries += 1;
if ( tries > 10) return new_len;
}
} while (!vec.size());
}
}
//Copy the previous values to the newly allocated space
if (nbElts > 0)
vec.segment(0, nbElts) = old_vec;
length = new_len;
if(num_expansions) ++num_expansions;
return 0;
}
/**
* \brief Allocate various working space for the numerical factorization phase.
* \param m number of rows of the input matrix
* \param n number of columns
* \param annz number of initial nonzeros in the matrix
* \param lwork if lwork=-1, this routine returns an estimated size of the required memory
* \param glu persistent data to facilitate multiple factors : will be deleted later ??
* \param fillratio estimated ratio of fill in the factors
* \param panel_size Size of a panel
* \return an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated memory when allocation failed, and 0 on success
* \note Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation
*/
template <typename Scalar, typename StorageIndex>
Index SparseLUImpl<Scalar,StorageIndex>::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu)
{
Index& num_expansions = glu.num_expansions; //No memory expansions so far
num_expansions = 0;
glu.nzumax = glu.nzlumax = (std::min)(fillratio * (annz+1) / n, m) * n; // estimated number of nonzeros in U
glu.nzlmax = (std::max)(Index(4), fillratio) * (annz+1) / 4; // estimated nnz in L factor
// Return the estimated size to the user if necessary
Index tempSpace;
tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
if (lwork == emptyIdxLU)
{
Index estimated_size;
estimated_size = (5 * n + 5) * sizeof(Index) + tempSpace
+ (glu.nzlmax + glu.nzumax) * sizeof(Index) + (glu.nzlumax+glu.nzumax) * sizeof(Scalar) + n;
return estimated_size;
}
// Setup the required space
// First allocate Integer pointers for L\U factors
glu.xsup.resize(n+1);
glu.supno.resize(n+1);
glu.xlsub.resize(n+1);
glu.xlusup.resize(n+1);
glu.xusub.resize(n+1);
// Reserve memory for L/U factors
do
{
if( (expand<ScalarVector>(glu.lusup, glu.nzlumax, 0, 0, num_expansions)<0)
|| (expand<ScalarVector>(glu.ucol, glu.nzumax, 0, 0, num_expansions)<0)
|| (expand<IndexVector> (glu.lsub, glu.nzlmax, 0, 0, num_expansions)<0)
|| (expand<IndexVector> (glu.usub, glu.nzumax, 0, 1, num_expansions)<0) )
{
//Reduce the estimated size and retry
glu.nzlumax /= 2;
glu.nzumax /= 2;
glu.nzlmax /= 2;
if (glu.nzlumax < annz ) return glu.nzlumax;
}
} while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size());
++num_expansions;
return 0;
} // end LuMemInit
/**
* \brief Expand the existing storage
* \param vec vector to expand
* \param[in,out] maxlen On input, previous size of vec (Number of elements to copy ). on output, new size
* \param nbElts current number of elements in the vector.
* \param memtype Type of the element to expand
* \param num_expansions Number of expansions
* \return 0 on success, > 0 size of the memory allocated so far
*/
template <typename Scalar, typename StorageIndex>
template <typename VectorType>
Index SparseLUImpl<Scalar,StorageIndex>::memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions)
{
Index failed_size;
if (memtype == USUB)
failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 1, num_expansions);
else
failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 0, num_expansions);
if (failed_size)
return failed_size;
return 0 ;
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSELU_MEMORY
| 7,599 | 32.480176 | 163 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_Structs.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file comes from a partly modified version of files slu_[s,d,c,z]defs.h
* -- SuperLU routine (version 4.1) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* November, 2010
*
* Global data structures used in LU factorization -
*
* nsuper: #supernodes = nsuper + 1, numbered [0, nsuper].
* (xsup,supno): supno[i] is the supernode no to which i belongs;
* xsup(s) points to the beginning of the s-th supernode.
* e.g. supno 0 1 2 2 3 3 3 4 4 4 4 4 (n=12)
* xsup 0 1 2 4 7 12
* Note: dfs will be performed on supernode rep. relative to the new
* row pivoting ordering
*
* (xlsub,lsub): lsub[*] contains the compressed subscript of
* rectangular supernodes; xlsub[j] points to the starting
* location of the j-th column in lsub[*]. Note that xlsub
* is indexed by column.
* Storage: original row subscripts
*
* During the course of sparse LU factorization, we also use
* (xlsub,lsub) for the purpose of symmetric pruning. For each
* supernode {s,s+1,...,t=s+r} with first column s and last
* column t, the subscript set
* lsub[j], j=xlsub[s], .., xlsub[s+1]-1
* is the structure of column s (i.e. structure of this supernode).
* It is used for the storage of numerical values.
* Furthermore,
* lsub[j], j=xlsub[t], .., xlsub[t+1]-1
* is the structure of the last column t of this supernode.
* It is for the purpose of symmetric pruning. Therefore, the
* structural subscripts can be rearranged without making physical
* interchanges among the numerical values.
*
* However, if the supernode has only one column, then we
* only keep one set of subscripts. For any subscript interchange
* performed, similar interchange must be done on the numerical
* values.
*
* The last column structures (for pruning) will be removed
* after the numercial LU factorization phase.
*
* (xlusup,lusup): lusup[*] contains the numerical values of the
* rectangular supernodes; xlusup[j] points to the starting
* location of the j-th column in storage vector lusup[*]
* Note: xlusup is indexed by column.
* Each rectangular supernode is stored by column-major
* scheme, consistent with Fortran 2-dim array storage.
*
* (xusub,ucol,usub): ucol[*] stores the numerical values of
* U-columns outside the rectangular supernodes. The row
* subscript of nonzero ucol[k] is stored in usub[k].
* xusub[i] points to the starting location of column i in ucol.
* Storage: new row subscripts; that is subscripts of PA.
*/
#ifndef EIGEN_LU_STRUCTS
#define EIGEN_LU_STRUCTS
namespace Eigen {
namespace internal {
typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType;
template <typename IndexVector, typename ScalarVector>
struct LU_GlobalLU_t {
typedef typename IndexVector::Scalar StorageIndex;
IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode
IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping)
ScalarVector lusup; // nonzero values of L ordered by columns
IndexVector lsub; // Compressed row indices of L rectangular supernodes.
IndexVector xlusup; // pointers to the beginning of each column in lusup
IndexVector xlsub; // pointers to the beginning of each column in lsub
Index nzlmax; // Current max size of lsub
Index nzlumax; // Current max size of lusup
ScalarVector ucol; // nonzero values of U ordered by columns
IndexVector usub; // row indices of U columns in ucol
IndexVector xusub; // Pointers to the beginning of each column of U in ucol
Index nzumax; // Current max size of ucol
Index n; // Number of columns in the matrix
Index num_expansions;
};
// Values to set for performance
struct perfvalues {
Index panel_size; // a panel consists of at most <panel_size> consecutive columns
Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns)
// in a subtree of the elimination tree is less than relax, this subtree is considered
// as one supernode regardless of the row structures of those columns
Index maxsuper; // The maximum size for a supernode in complete LU
Index rowblk; // The minimum row dimension for 2-D blocking to be used;
Index colblk; // The minimum column dimension for 2-D blocking to be used;
Index fillfactor; // The estimated fills factors for L and U, compared with A
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_LU_STRUCTS
| 4,972 | 43.801802 | 103 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
// Copyright (C) 2012 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSELU_SUPERNODAL_MATRIX_H
#define EIGEN_SPARSELU_SUPERNODAL_MATRIX_H
namespace Eigen {
namespace internal {
/** \ingroup SparseLU_Module
* \brief a class to manipulate the L supernodal factor from the SparseLU factorization
*
* This class contain the data to easily store
* and manipulate the supernodes during the factorization and solution phase of Sparse LU.
* Only the lower triangular matrix has supernodes.
*
* NOTE : This class corresponds to the SCformat structure in SuperLU
*
*/
/* TODO
* InnerIterator as for sparsematrix
* SuperInnerIterator to iterate through all supernodes
* Function for triangular solve
*/
template <typename _Scalar, typename _StorageIndex>
class MappedSuperNodalMatrix
{
public:
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
public:
MappedSuperNodalMatrix()
{
}
MappedSuperNodalMatrix(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
{
setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col);
}
~MappedSuperNodalMatrix()
{
}
/**
* Set appropriate pointers for the lower triangular supernodal matrix
* These infos are available at the end of the numerical factorization
* FIXME This class will be modified such that it can be use in the course
* of the factorization.
*/
void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
{
m_row = m;
m_col = n;
m_nzval = nzval.data();
m_nzval_colptr = nzval_colptr.data();
m_rowind = rowind.data();
m_rowind_colptr = rowind_colptr.data();
m_nsuper = col_to_sup(n);
m_col_to_sup = col_to_sup.data();
m_sup_to_col = sup_to_col.data();
}
/**
* Number of rows
*/
Index rows() { return m_row; }
/**
* Number of columns
*/
Index cols() { return m_col; }
/**
* Return the array of nonzero values packed by column
*
* The size is nnz
*/
Scalar* valuePtr() { return m_nzval; }
const Scalar* valuePtr() const
{
return m_nzval;
}
/**
* Return the pointers to the beginning of each column in \ref valuePtr()
*/
StorageIndex* colIndexPtr()
{
return m_nzval_colptr;
}
const StorageIndex* colIndexPtr() const
{
return m_nzval_colptr;
}
/**
* Return the array of compressed row indices of all supernodes
*/
StorageIndex* rowIndex() { return m_rowind; }
const StorageIndex* rowIndex() const
{
return m_rowind;
}
/**
* Return the location in \em rowvaluePtr() which starts each column
*/
StorageIndex* rowIndexPtr() { return m_rowind_colptr; }
const StorageIndex* rowIndexPtr() const
{
return m_rowind_colptr;
}
/**
* Return the array of column-to-supernode mapping
*/
StorageIndex* colToSup() { return m_col_to_sup; }
const StorageIndex* colToSup() const
{
return m_col_to_sup;
}
/**
* Return the array of supernode-to-column mapping
*/
StorageIndex* supToCol() { return m_sup_to_col; }
const StorageIndex* supToCol() const
{
return m_sup_to_col;
}
/**
* Return the number of supernodes
*/
Index nsuper() const
{
return m_nsuper;
}
class InnerIterator;
template<typename Dest>
void solveInPlace( MatrixBase<Dest>&X) const;
protected:
Index m_row; // Number of rows
Index m_col; // Number of columns
Index m_nsuper; // Number of supernodes
Scalar* m_nzval; //array of nonzero values packed by column
StorageIndex* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j
StorageIndex* m_rowind; // Array of compressed row indices of rectangular supernodes
StorageIndex* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j
StorageIndex* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs
StorageIndex* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode
private :
};
/**
* \brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L
*
*/
template<typename Scalar, typename StorageIndex>
class MappedSuperNodalMatrix<Scalar,StorageIndex>::InnerIterator
{
public:
InnerIterator(const MappedSuperNodalMatrix& mat, Index outer)
: m_matrix(mat),
m_outer(outer),
m_supno(mat.colToSup()[outer]),
m_idval(mat.colIndexPtr()[outer]),
m_startidval(m_idval),
m_endidval(mat.colIndexPtr()[outer+1]),
m_idrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]]),
m_endidrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]+1])
{}
inline InnerIterator& operator++()
{
m_idval++;
m_idrow++;
return *this;
}
inline Scalar value() const { return m_matrix.valuePtr()[m_idval]; }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_idval]); }
inline Index index() const { return m_matrix.rowIndex()[m_idrow]; }
inline Index row() const { return index(); }
inline Index col() const { return m_outer; }
inline Index supIndex() const { return m_supno; }
inline operator bool() const
{
return ( (m_idval < m_endidval) && (m_idval >= m_startidval)
&& (m_idrow < m_endidrow) );
}
protected:
const MappedSuperNodalMatrix& m_matrix; // Supernodal lower triangular matrix
const Index m_outer; // Current column
const Index m_supno; // Current SuperNode number
Index m_idval; // Index to browse the values in the current column
const Index m_startidval; // Start of the column value
const Index m_endidval; // End of the column value
Index m_idrow; // Index to browse the row indices
Index m_endidrow; // End index of row indices of the current column
};
/**
* \brief Solve with the supernode triangular matrix
*
*/
template<typename Scalar, typename Index_>
template<typename Dest>
void MappedSuperNodalMatrix<Scalar,Index_>::solveInPlace( MatrixBase<Dest>&X) const
{
/* Explicit type conversion as the Index type of MatrixBase<Dest> may be wider than Index */
// eigen_assert(X.rows() <= NumTraits<Index>::highest());
// eigen_assert(X.cols() <= NumTraits<Index>::highest());
Index n = int(X.rows());
Index nrhs = Index(X.cols());
const Scalar * Lval = valuePtr(); // Nonzero values
Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor> work(n, nrhs); // working vector
work.setZero();
for (Index k = 0; k <= nsuper(); k ++)
{
Index fsupc = supToCol()[k]; // First column of the current supernode
Index istart = rowIndexPtr()[fsupc]; // Pointer index to the subscript of the current column
Index nsupr = rowIndexPtr()[fsupc+1] - istart; // Number of rows in the current supernode
Index nsupc = supToCol()[k+1] - fsupc; // Number of columns in the current supernode
Index nrow = nsupr - nsupc; // Number of rows in the non-diagonal part of the supernode
Index irow; //Current index row
if (nsupc == 1 )
{
for (Index j = 0; j < nrhs; j++)
{
InnerIterator it(*this, fsupc);
++it; // Skip the diagonal element
for (; it; ++it)
{
irow = it.row();
X(irow, j) -= X(fsupc, j) * it.value();
}
}
}
else
{
// The supernode has more than one column
Index luptr = colIndexPtr()[fsupc];
Index lda = colIndexPtr()[fsupc+1] - luptr;
// Triangular solve
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
U = A.template triangularView<UnitLower>().solve(U);
// Matrix-vector product
new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
work.topRows(nrow).noalias() = A * U;
//Begin Scatter
for (Index j = 0; j < nrhs; j++)
{
Index iptr = istart + nsupc;
for (Index i = 0; i < nrow; i++)
{
irow = rowIndex()[iptr];
X(irow, j) -= work(i, j); // Scatter operation
work(i, j) = Scalar(0);
iptr++;
}
}
}
}
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSELU_MATRIX_H
| 10,020 | 32.182119 | 146 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_Utils.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSELU_UTILS_H
#define EIGEN_SPARSELU_UTILS_H
namespace Eigen {
namespace internal {
/**
* \brief Count Nonzero elements in the factors
*/
template <typename Scalar, typename StorageIndex>
void SparseLUImpl<Scalar,StorageIndex>::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu)
{
nnzL = 0;
nnzU = (glu.xusub)(n);
Index nsuper = (glu.supno)(n);
Index jlen;
Index i, j, fsupc;
if (n <= 0 ) return;
// For each supernode
for (i = 0; i <= nsuper; i++)
{
fsupc = glu.xsup(i);
jlen = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
for (j = fsupc; j < glu.xsup(i+1); j++)
{
nnzL += jlen;
nnzU += j - fsupc + 1;
jlen--;
}
}
}
/**
* \brief Fix up the data storage lsub for L-subscripts.
*
* It removes the subscripts sets for structural pruning,
* and applies permutation to the remaining subscripts
*
*/
template <typename Scalar, typename StorageIndex>
void SparseLUImpl<Scalar,StorageIndex>::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu)
{
Index fsupc, i, j, k, jstart;
StorageIndex nextl = 0;
Index nsuper = (glu.supno)(n);
// For each supernode
for (i = 0; i <= nsuper; i++)
{
fsupc = glu.xsup(i);
jstart = glu.xlsub(fsupc);
glu.xlsub(fsupc) = nextl;
for (j = jstart; j < glu.xlsub(fsupc + 1); j++)
{
glu.lsub(nextl) = perm_r(glu.lsub(j)); // Now indexed into P*A
nextl++;
}
for (k = fsupc+1; k < glu.xsup(i+1); k++)
glu.xlsub(k) = nextl; // other columns in supernode i
}
glu.xlsub(n) = nextl;
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSELU_UTILS_H
| 2,047 | 24.283951 | 105 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_column_bmod.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
// Copyright (C) 2012 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of xcolumn_bmod.c file in SuperLU
* -- SuperLU routine (version 3.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* October 15, 2003
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_COLUMN_BMOD_H
#define SPARSELU_COLUMN_BMOD_H
namespace Eigen {
namespace internal {
/**
* \brief Performs numeric block updates (sup-col) in topological order
*
* \param jcol current column to update
* \param nseg Number of segments in the U part
* \param dense Store the full representation of the column
* \param tempv working array
* \param segrep segment representative ...
* \param repfnz ??? First nonzero column in each row ??? ...
* \param fpanelc First column in the current panel
* \param glu Global LU data.
* \return 0 - successful return
* > 0 - number of bytes allocated when run out of space
*
*/
template <typename Scalar, typename StorageIndex>
Index SparseLUImpl<Scalar,StorageIndex>::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv,
BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu)
{
Index jsupno, k, ksub, krep, ksupno;
Index lptr, nrow, isub, irow, nextlu, new_next, ufirst;
Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros;
/* krep = representative of current k-th supernode
* fsupc = first supernodal column
* nsupc = number of columns in a supernode
* nsupr = number of rows in a supernode
* luptr = location of supernodal LU-block in storage
* kfnz = first nonz in the k-th supernodal segment
* no_zeros = no lf leading zeros in a supernodal U-segment
*/
jsupno = glu.supno(jcol);
// For each nonzero supernode segment of U[*,j] in topological order
k = nseg - 1;
Index d_fsupc; // distance between the first column of the current panel and the
// first column of the current snode
Index fst_col; // First column within small LU update
Index segsize;
for (ksub = 0; ksub < nseg; ksub++)
{
krep = segrep(k); k--;
ksupno = glu.supno(krep);
if (jsupno != ksupno )
{
// outside the rectangular supernode
fsupc = glu.xsup(ksupno);
fst_col = (std::max)(fsupc, fpanelc);
// Distance from the current supernode to the current panel;
// d_fsupc = 0 if fsupc > fpanelc
d_fsupc = fst_col - fsupc;
luptr = glu.xlusup(fst_col) + d_fsupc;
lptr = glu.xlsub(fsupc) + d_fsupc;
kfnz = repfnz(krep);
kfnz = (std::max)(kfnz, fpanelc);
segsize = krep - kfnz + 1;
nsupc = krep - fst_col + 1;
nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
nrow = nsupr - d_fsupc - nsupc;
Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col);
// Perform a triangular solver and block update,
// then scatter the result of sup-col update to dense
no_zeros = kfnz - fst_col;
if(segsize==1)
LU_kernel_bmod<1>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
else
LU_kernel_bmod<Dynamic>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
} // end if jsupno
} // end for each segment
// Process the supernodal portion of L\U[*,j]
nextlu = glu.xlusup(jcol);
fsupc = glu.xsup(jsupno);
// copy the SPA dense into L\U[*,j]
Index mem;
new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc);
Index offset = internal::first_multiple<Index>(new_next, internal::packet_traits<Scalar>::size) - new_next;
if(offset)
new_next += offset;
while (new_next > glu.nzlumax )
{
mem = memXpand<ScalarVector>(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions);
if (mem) return mem;
}
for (isub = glu.xlsub(fsupc); isub < glu.xlsub(fsupc+1); isub++)
{
irow = glu.lsub(isub);
glu.lusup(nextlu) = dense(irow);
dense(irow) = Scalar(0.0);
++nextlu;
}
if(offset)
{
glu.lusup.segment(nextlu,offset).setZero();
nextlu += offset;
}
glu.xlusup(jcol + 1) = StorageIndex(nextlu); // close L\U(*,jcol);
/* For more updates within the panel (also within the current supernode),
* should start from the first column of the panel, or the first column
* of the supernode, whichever is bigger. There are two cases:
* 1) fsupc < fpanelc, then fst_col <-- fpanelc
* 2) fsupc >= fpanelc, then fst_col <-- fsupc
*/
fst_col = (std::max)(fsupc, fpanelc);
if (fst_col < jcol)
{
// Distance between the current supernode and the current panel
// d_fsupc = 0 if fsupc >= fpanelc
d_fsupc = fst_col - fsupc;
lptr = glu.xlsub(fsupc) + d_fsupc;
luptr = glu.xlusup(fst_col) + d_fsupc;
nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); // leading dimension
nsupc = jcol - fst_col; // excluding jcol
nrow = nsupr - d_fsupc - nsupc;
// points to the beginning of jcol in snode L\U(jsupno)
ufirst = glu.xlusup(jcol) + d_fsupc;
Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc);
u = A.template triangularView<UnitLower>().solve(u);
new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow);
l.noalias() -= A * u;
} // End if fst_col
return 0;
}
} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_COLUMN_BMOD_H
| 6,710 | 35.873626 | 134 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_column_dfs.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of [s,d,c,z]column_dfs.c file in SuperLU
* -- SuperLU routine (version 2.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* November 15, 1997
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_COLUMN_DFS_H
#define SPARSELU_COLUMN_DFS_H
template <typename Scalar, typename StorageIndex> class SparseLUImpl;
namespace Eigen {
namespace internal {
template<typename IndexVector, typename ScalarVector>
struct column_dfs_traits : no_assignment_operator
{
typedef typename ScalarVector::Scalar Scalar;
typedef typename IndexVector::Scalar StorageIndex;
column_dfs_traits(Index jcol, Index& jsuper, typename SparseLUImpl<Scalar, StorageIndex>::GlobalLU_t& glu, SparseLUImpl<Scalar, StorageIndex>& luImpl)
: m_jcol(jcol), m_jsuper_ref(jsuper), m_glu(glu), m_luImpl(luImpl)
{}
bool update_segrep(Index /*krep*/, Index /*jj*/)
{
return true;
}
void mem_expand(IndexVector& lsub, Index& nextl, Index chmark)
{
if (nextl >= m_glu.nzlmax)
m_luImpl.memXpand(lsub, m_glu.nzlmax, nextl, LSUB, m_glu.num_expansions);
if (chmark != (m_jcol-1)) m_jsuper_ref = emptyIdxLU;
}
enum { ExpandMem = true };
Index m_jcol;
Index& m_jsuper_ref;
typename SparseLUImpl<Scalar, StorageIndex>::GlobalLU_t& m_glu;
SparseLUImpl<Scalar, StorageIndex>& m_luImpl;
};
/**
* \brief Performs a symbolic factorization on column jcol and decide the supernode boundary
*
* A supernode representative is the last column of a supernode.
* The nonzeros in U[*,j] are segments that end at supernodes representatives.
* The routine returns a list of the supernodal representatives
* in topological order of the dfs that generates them.
* The location of the first nonzero in each supernodal segment
* (supernodal entry location) is also returned.
*
* \param m number of rows in the matrix
* \param jcol Current column
* \param perm_r Row permutation
* \param maxsuper Maximum number of column allowed in a supernode
* \param [in,out] nseg Number of segments in current U[*,j] - new segments appended
* \param lsub_col defines the rhs vector to start the dfs
* \param [in,out] segrep Segment representatives - new segments appended
* \param repfnz First nonzero location in each row
* \param xprune
* \param marker marker[i] == jj, if i was visited during dfs of current column jj;
* \param parent
* \param xplore working array
* \param glu global LU data
* \return 0 success
* > 0 number of bytes allocated when run out of space
*
*/
template <typename Scalar, typename StorageIndex>
Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg,
BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune,
IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
{
Index jsuper = glu.supno(jcol);
Index nextl = glu.xlsub(jcol);
VectorBlock<IndexVector> marker2(marker, 2*m, m);
column_dfs_traits<IndexVector, ScalarVector> traits(jcol, jsuper, glu, *this);
// For each nonzero in A(*,jcol) do dfs
for (Index k = 0; ((k < m) ? lsub_col[k] != emptyIdxLU : false) ; k++)
{
Index krow = lsub_col(k);
lsub_col(k) = emptyIdxLU;
Index kmark = marker2(krow);
// krow was visited before, go to the next nonz;
if (kmark == jcol) continue;
dfs_kernel(StorageIndex(jcol), perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,
xplore, glu, nextl, krow, traits);
} // for each nonzero ...
Index fsupc;
StorageIndex nsuper = glu.supno(jcol);
StorageIndex jcolp1 = StorageIndex(jcol) + 1;
Index jcolm1 = jcol - 1;
// check to see if j belongs in the same supernode as j-1
if ( jcol == 0 )
{ // Do nothing for column 0
nsuper = glu.supno(0) = 0 ;
}
else
{
fsupc = glu.xsup(nsuper);
StorageIndex jptr = glu.xlsub(jcol); // Not yet compressed
StorageIndex jm1ptr = glu.xlsub(jcolm1);
// Use supernodes of type T2 : see SuperLU paper
if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU;
// Make sure the number of columns in a supernode doesn't
// exceed threshold
if ( (jcol - fsupc) >= maxsuper) jsuper = emptyIdxLU;
/* If jcol starts a new supernode, reclaim storage space in
* glu.lsub from previous supernode. Note we only store
* the subscript set of the first and last columns of
* a supernode. (first for num values, last for pruning)
*/
if (jsuper == emptyIdxLU)
{ // starts a new supernode
if ( (fsupc < jcolm1-1) )
{ // >= 3 columns in nsuper
StorageIndex ito = glu.xlsub(fsupc+1);
glu.xlsub(jcolm1) = ito;
StorageIndex istop = ito + jptr - jm1ptr;
xprune(jcolm1) = istop; // intialize xprune(jcol-1)
glu.xlsub(jcol) = istop;
for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
glu.lsub(ito) = glu.lsub(ifrom);
nextl = ito; // = istop + length(jcol)
}
nsuper++;
glu.supno(jcol) = nsuper;
} // if a new supernode
} // end else: jcol > 0
// Tidy up the pointers before exit
glu.xsup(nsuper+1) = jcolp1;
glu.supno(jcolp1) = nsuper;
xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning
glu.xlsub(jcolp1) = StorageIndex(nextl);
return 0;
}
} // end namespace internal
} // end namespace Eigen
#endif
| 6,580 | 35.561111 | 152 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of [s,d,c,z]copy_to_ucol.c file in SuperLU
* -- SuperLU routine (version 2.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* November 15, 1997
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_COPY_TO_UCOL_H
#define SPARSELU_COPY_TO_UCOL_H
namespace Eigen {
namespace internal {
/**
* \brief Performs numeric block updates (sup-col) in topological order
*
* \param jcol current column to update
* \param nseg Number of segments in the U part
* \param segrep segment representative ...
* \param repfnz First nonzero column in each row ...
* \param perm_r Row permutation
* \param dense Store the full representation of the column
* \param glu Global LU data.
* \return 0 - successful return
* > 0 - number of bytes allocated when run out of space
*
*/
template <typename Scalar, typename StorageIndex>
Index SparseLUImpl<Scalar,StorageIndex>::copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep,
BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu)
{
Index ksub, krep, ksupno;
Index jsupno = glu.supno(jcol);
// For each nonzero supernode segment of U[*,j] in topological order
Index k = nseg - 1, i;
StorageIndex nextu = glu.xusub(jcol);
Index kfnz, isub, segsize;
Index new_next,irow;
Index fsupc, mem;
for (ksub = 0; ksub < nseg; ksub++)
{
krep = segrep(k); k--;
ksupno = glu.supno(krep);
if (jsupno != ksupno ) // should go into ucol();
{
kfnz = repfnz(krep);
if (kfnz != emptyIdxLU)
{ // Nonzero U-segment
fsupc = glu.xsup(ksupno);
isub = glu.xlsub(fsupc) + kfnz - fsupc;
segsize = krep - kfnz + 1;
new_next = nextu + segsize;
while (new_next > glu.nzumax)
{
mem = memXpand<ScalarVector>(glu.ucol, glu.nzumax, nextu, UCOL, glu.num_expansions);
if (mem) return mem;
mem = memXpand<IndexVector>(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions);
if (mem) return mem;
}
for (i = 0; i < segsize; i++)
{
irow = glu.lsub(isub);
glu.usub(nextu) = perm_r(irow); // Unlike the L part, the U part is stored in its final order
glu.ucol(nextu) = dense(irow);
dense(irow) = Scalar(0.0);
nextu++;
isub++;
}
} // end nonzero U-segment
} // end if jsupno
} // end for each segment
glu.xusub(jcol + 1) = nextu; // close U(*,jcol)
return 0;
}
} // namespace internal
} // end namespace Eigen
#endif // SPARSELU_COPY_TO_UCOL_H
| 3,679 | 33.074074 | 141 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSELU_GEMM_KERNEL_H
#define EIGEN_SPARSELU_GEMM_KERNEL_H
namespace Eigen {
namespace internal {
/** \internal
* A general matrix-matrix product kernel optimized for the SparseLU factorization.
* - A, B, and C must be column major
* - lda and ldc must be multiples of the respective packet size
* - C must have the same alignment as A
*/
template<typename Scalar>
EIGEN_DONT_INLINE
void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc)
{
using namespace Eigen::internal;
typedef typename packet_traits<Scalar>::type Packet;
enum {
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
PacketSize = packet_traits<Scalar>::size,
PM = 8, // peeling in M
RN = 2, // register blocking
RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking
BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk
SM = PM*PacketSize // step along M
};
Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking
Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once
Index i0 = internal::first_default_aligned(A,m);
eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_default_aligned(C,m)));
// handle the non aligned rows of A and C without any optimization:
for(Index i=0; i<i0; ++i)
{
for(Index j=0; j<n; ++j)
{
Scalar c = C[i+j*ldc];
for(Index k=0; k<d; ++k)
c += B[k+j*ldb] * A[i+k*lda];
C[i+j*ldc] = c;
}
}
// process the remaining rows per chunk of BM rows
for(Index ib=i0; ib<m; ib+=BM)
{
Index actual_b = std::min<Index>(BM, m-ib); // actual number of rows
Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling
Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization
// Let's process two columns of B-C at once
for(Index j=0; j<n_end; j+=RN)
{
const Scalar* Bc0 = B+(j+0)*ldb;
const Scalar* Bc1 = B+(j+1)*ldb;
for(Index k=0; k<d_end; k+=RK)
{
// load and expand a RN x RK block of B
Packet b00, b10, b20, b30, b01, b11, b21, b31;
{ b00 = pset1<Packet>(Bc0[0]); }
{ b10 = pset1<Packet>(Bc0[1]); }
if(RK==4) { b20 = pset1<Packet>(Bc0[2]); }
if(RK==4) { b30 = pset1<Packet>(Bc0[3]); }
{ b01 = pset1<Packet>(Bc1[0]); }
{ b11 = pset1<Packet>(Bc1[1]); }
if(RK==4) { b21 = pset1<Packet>(Bc1[2]); }
if(RK==4) { b31 = pset1<Packet>(Bc1[3]); }
Packet a0, a1, a2, a3, c0, c1, t0, t1;
const Scalar* A0 = A+ib+(k+0)*lda;
const Scalar* A1 = A+ib+(k+1)*lda;
const Scalar* A2 = A+ib+(k+2)*lda;
const Scalar* A3 = A+ib+(k+3)*lda;
Scalar* C0 = C+ib+(j+0)*ldc;
Scalar* C1 = C+ib+(j+1)*ldc;
a0 = pload<Packet>(A0);
a1 = pload<Packet>(A1);
if(RK==4)
{
a2 = pload<Packet>(A2);
a3 = pload<Packet>(A3);
}
else
{
// workaround "may be used uninitialized in this function" warning
a2 = a3 = a0;
}
#define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);}
#define WORK(I) \
c0 = pload<Packet>(C0+i+(I)*PacketSize); \
c1 = pload<Packet>(C1+i+(I)*PacketSize); \
KMADD(c0, a0, b00, t0) \
KMADD(c1, a0, b01, t1) \
a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
KMADD(c0, a1, b10, t0) \
KMADD(c1, a1, b11, t1) \
a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
if(RK==4){ KMADD(c0, a2, b20, t0) }\
if(RK==4){ KMADD(c1, a2, b21, t1) }\
if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\
if(RK==4){ KMADD(c0, a3, b30, t0) }\
if(RK==4){ KMADD(c1, a3, b31, t1) }\
if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
pstore(C0+i+(I)*PacketSize, c0); \
pstore(C1+i+(I)*PacketSize, c1)
// process rows of A' - C' with aggressive vectorization and peeling
for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
{
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1");
prefetch((A0+i+(5)*PacketSize));
prefetch((A1+i+(5)*PacketSize));
if(RK==4) prefetch((A2+i+(5)*PacketSize));
if(RK==4) prefetch((A3+i+(5)*PacketSize));
WORK(0);
WORK(1);
WORK(2);
WORK(3);
WORK(4);
WORK(5);
WORK(6);
WORK(7);
}
// process the remaining rows with vectorization only
for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
{
WORK(0);
}
#undef WORK
// process the remaining rows without vectorization
for(Index i=actual_b_end2; i<actual_b; ++i)
{
if(RK==4)
{
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3];
}
else
{
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1];
}
}
Bc0 += RK;
Bc1 += RK;
} // peeled loop on k
} // peeled loop on the columns j
// process the last column (we now perform a matrix-vector product)
if((n-n_end)>0)
{
const Scalar* Bc0 = B+(n-1)*ldb;
for(Index k=0; k<d_end; k+=RK)
{
// load and expand a 1 x RK block of B
Packet b00, b10, b20, b30;
b00 = pset1<Packet>(Bc0[0]);
b10 = pset1<Packet>(Bc0[1]);
if(RK==4) b20 = pset1<Packet>(Bc0[2]);
if(RK==4) b30 = pset1<Packet>(Bc0[3]);
Packet a0, a1, a2, a3, c0, t0/*, t1*/;
const Scalar* A0 = A+ib+(k+0)*lda;
const Scalar* A1 = A+ib+(k+1)*lda;
const Scalar* A2 = A+ib+(k+2)*lda;
const Scalar* A3 = A+ib+(k+3)*lda;
Scalar* C0 = C+ib+(n_end)*ldc;
a0 = pload<Packet>(A0);
a1 = pload<Packet>(A1);
if(RK==4)
{
a2 = pload<Packet>(A2);
a3 = pload<Packet>(A3);
}
else
{
// workaround "may be used uninitialized in this function" warning
a2 = a3 = a0;
}
#define WORK(I) \
c0 = pload<Packet>(C0+i+(I)*PacketSize); \
KMADD(c0, a0, b00, t0) \
a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
KMADD(c0, a1, b10, t0) \
a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
if(RK==4){ KMADD(c0, a2, b20, t0) }\
if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\
if(RK==4){ KMADD(c0, a3, b30, t0) }\
if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
pstore(C0+i+(I)*PacketSize, c0);
// agressive vectorization and peeling
for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
{
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
WORK(0);
WORK(1);
WORK(2);
WORK(3);
WORK(4);
WORK(5);
WORK(6);
WORK(7);
}
// vectorization only
for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
{
WORK(0);
}
// remaining scalars
for(Index i=actual_b_end2; i<actual_b; ++i)
{
if(RK==4)
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
else
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
}
Bc0 += RK;
#undef WORK
}
}
// process the last columns of A, corresponding to the last rows of B
Index rd = d-d_end;
if(rd>0)
{
for(Index j=0; j<n; ++j)
{
enum {
Alignment = PacketSize>1 ? Aligned : 0
};
typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector;
typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector;
if(rd==1) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b);
else if(rd==2) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
+ B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b);
else MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
+ B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b)
+ B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b);
}
}
} // blocking on the rows of A and C
}
#undef KMADD
} // namespace internal
} // namespace Eigen
#endif // EIGEN_SPARSELU_GEMM_KERNEL_H
| 10,216 | 35.359431 | 123 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/* This file is a modified version of heap_relax_snode.c file in SuperLU
* -- SuperLU routine (version 3.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* October 15, 2003
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_HEAP_RELAX_SNODE_H
#define SPARSELU_HEAP_RELAX_SNODE_H
namespace Eigen {
namespace internal {
/**
* \brief Identify the initial relaxed supernodes
*
* This routine applied to a symmetric elimination tree.
* It assumes that the matrix has been reordered according to the postorder of the etree
* \param n The number of columns
* \param et elimination tree
* \param relax_columns Maximum number of columns allowed in a relaxed snode
* \param descendants Number of descendants of each node in the etree
* \param relax_end last column in a supernode
*/
template <typename Scalar, typename StorageIndex>
void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)
{
// The etree may not be postordered, but its heap ordered
IndexVector post;
internal::treePostorder(StorageIndex(n), et, post); // Post order etree
IndexVector inv_post(n+1);
for (StorageIndex i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
// Renumber etree in postorder
IndexVector iwork(n);
IndexVector et_save(n+1);
for (Index i = 0; i < n; ++i)
{
iwork(post(i)) = post(et(i));
}
et_save = et; // Save the original etree
et = iwork;
// compute the number of descendants of each node in the etree
relax_end.setConstant(emptyIdxLU);
Index j, parent;
descendants.setZero();
for (j = 0; j < n; j++)
{
parent = et(j);
if (parent != n) // not the dummy root
descendants(parent) += descendants(j) + 1;
}
// Identify the relaxed supernodes by postorder traversal of the etree
Index snode_start; // beginning of a snode
StorageIndex k;
Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
Index nsuper_et = 0; // Number of relaxed snodes in the original etree
StorageIndex l;
for (j = 0; j < n; )
{
parent = et(j);
snode_start = j;
while ( parent != n && descendants(parent) < relax_columns )
{
j = parent;
parent = et(j);
}
// Found a supernode in postordered etree, j is the last column
++nsuper_et_post;
k = StorageIndex(n);
for (Index i = snode_start; i <= j; ++i)
k = (std::min)(k, inv_post(i));
l = inv_post(j);
if ( (l - k) == (j - snode_start) ) // Same number of columns in the snode
{
// This is also a supernode in the original etree
relax_end(k) = l; // Record last column
++nsuper_et;
}
else
{
for (Index i = snode_start; i <= j; ++i)
{
l = inv_post(i);
if (descendants(i) == 0)
{
relax_end(l) = l;
++nsuper_et;
}
}
}
j++;
// Search for a new leaf
while (descendants(j) != 0 && j < n) j++;
} // End postorder traversal of the etree
// Recover the original etree
et = et_save;
}
} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_HEAP_RELAX_SNODE_H
| 4,179 | 31.913386 | 166 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
// Copyright (C) 2012 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef SPARSELU_KERNEL_BMOD_H
#define SPARSELU_KERNEL_BMOD_H
namespace Eigen {
namespace internal {
template <int SegSizeAtCompileTime> struct LU_kernel_bmod
{
/** \internal
* \brief Performs numeric block updates from a given supernode to a single column
*
* \param segsize Size of the segment (and blocks ) to use for updates
* \param[in,out] dense Packed values of the original matrix
* \param tempv temporary vector to use for updates
* \param lusup array containing the supernodes
* \param lda Leading dimension in the supernode
* \param nrow Number of rows in the rectangular part of the supernode
* \param lsub compressed row subscripts of supernodes
* \param lptr pointer to the first column of the current supernode in lsub
* \param no_zeros Number of nonzeros elements before the diagonal part of the supernode
*/
template <typename BlockScalarVector, typename ScalarVector, typename IndexVector>
static EIGEN_DONT_INLINE void run(const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda,
const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros);
};
template <int SegSizeAtCompileTime>
template <typename BlockScalarVector, typename ScalarVector, typename IndexVector>
EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda,
const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros)
{
typedef typename ScalarVector::Scalar Scalar;
// First, copy U[*,j] segment from dense(*) to tempv(*)
// The result of triangular solve is in tempv[*];
// The result of matric-vector update is in dense[*]
Index isub = lptr + no_zeros;
Index i;
Index irow;
for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)
{
irow = lsub(isub);
tempv(i) = dense(irow);
++isub;
}
// Dense triangular solve -- start effective triangle
luptr += lda * no_zeros + no_zeros;
// Form Eigen matrix and vector
Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
Map<Matrix<Scalar,SegSizeAtCompileTime,1> > u(tempv.data(), segsize);
u = A.template triangularView<UnitLower>().solve(u);
// Dense matrix-vector product y <-- B*x
luptr += segsize;
const Index PacketSize = internal::packet_traits<Scalar>::size;
Index ldl = internal::first_multiple(nrow, PacketSize);
Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
Index aligned_offset = internal::first_default_aligned(tempv.data()+segsize, PacketSize);
Index aligned_with_B_offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize))%PacketSize;
Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );
l.setZero();
internal::sparselu_gemm<Scalar>(l.rows(), l.cols(), B.cols(), B.data(), B.outerStride(), u.data(), u.outerStride(), l.data(), l.outerStride());
// Scatter tempv[] into SPA dense[] as a temporary storage
isub = lptr + no_zeros;
for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)
{
irow = lsub(isub++);
dense(irow) = tempv(i);
}
// Scatter l into SPA dense[]
for (i = 0; i < nrow; i++)
{
irow = lsub(isub++);
dense(irow) -= l(i);
}
}
template <> struct LU_kernel_bmod<1>
{
template <typename BlockScalarVector, typename ScalarVector, typename IndexVector>
static EIGEN_DONT_INLINE void run(const Index /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr,
const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros);
};
template <typename BlockScalarVector, typename ScalarVector, typename IndexVector>
EIGEN_DONT_INLINE void LU_kernel_bmod<1>::run(const Index /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr,
const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros)
{
typedef typename ScalarVector::Scalar Scalar;
typedef typename IndexVector::Scalar StorageIndex;
Scalar f = dense(lsub(lptr + no_zeros));
luptr += lda * no_zeros + no_zeros + 1;
const Scalar* a(lusup.data() + luptr);
const StorageIndex* irow(lsub.data()+lptr + no_zeros + 1);
Index i = 0;
for (; i+1 < nrow; i+=2)
{
Index i0 = *(irow++);
Index i1 = *(irow++);
Scalar a0 = *(a++);
Scalar a1 = *(a++);
Scalar d0 = dense.coeff(i0);
Scalar d1 = dense.coeff(i1);
d0 -= f*a0;
d1 -= f*a1;
dense.coeffRef(i0) = d0;
dense.coeffRef(i1) = d1;
}
if(i<nrow)
dense.coeffRef(*(irow++)) -= f * *(a++);
}
} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_KERNEL_BMOD_H
| 5,721 | 42.679389 | 184 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_panel_bmod.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
// Copyright (C) 2012 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of [s,d,c,z]panel_bmod.c file in SuperLU
* -- SuperLU routine (version 3.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* October 15, 2003
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_PANEL_BMOD_H
#define SPARSELU_PANEL_BMOD_H
namespace Eigen {
namespace internal {
/**
* \brief Performs numeric block updates (sup-panel) in topological order.
*
* Before entering this routine, the original nonzeros in the panel
* were already copied i nto the spa[m,w]
*
* \param m number of rows in the matrix
* \param w Panel size
* \param jcol Starting column of the panel
* \param nseg Number of segments in the U part
* \param dense Store the full representation of the panel
* \param tempv working array
* \param segrep segment representative... first row in the segment
* \param repfnz First nonzero rows
* \param glu Global LU data.
*
*
*/
template <typename Scalar, typename StorageIndex>
void SparseLUImpl<Scalar,StorageIndex>::panel_bmod(const Index m, const Index w, const Index jcol,
const Index nseg, ScalarVector& dense, ScalarVector& tempv,
IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu)
{
Index ksub,jj,nextl_col;
Index fsupc, nsupc, nsupr, nrow;
Index krep, kfnz;
Index lptr; // points to the row subscripts of a supernode
Index luptr; // ...
Index segsize,no_zeros ;
// For each nonz supernode segment of U[*,j] in topological order
Index k = nseg - 1;
const Index PacketSize = internal::packet_traits<Scalar>::size;
for (ksub = 0; ksub < nseg; ksub++)
{ // For each updating supernode
/* krep = representative of current k-th supernode
* fsupc = first supernodal column
* nsupc = number of columns in a supernode
* nsupr = number of rows in a supernode
*/
krep = segrep(k); k--;
fsupc = glu.xsup(glu.supno(krep));
nsupc = krep - fsupc + 1;
nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
nrow = nsupr - nsupc;
lptr = glu.xlsub(fsupc);
// loop over the panel columns to detect the actual number of columns and rows
Index u_rows = 0;
Index u_cols = 0;
for (jj = jcol; jj < jcol + w; jj++)
{
nextl_col = (jj-jcol) * m;
VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
kfnz = repfnz_col(krep);
if ( kfnz == emptyIdxLU )
continue; // skip any zero segment
segsize = krep - kfnz + 1;
u_cols++;
u_rows = (std::max)(segsize,u_rows);
}
if(nsupc >= 2)
{
Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);
Map<ScalarMatrix, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
// gather U
Index u_col = 0;
for (jj = jcol; jj < jcol + w; jj++)
{
nextl_col = (jj-jcol) * m;
VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
kfnz = repfnz_col(krep);
if ( kfnz == emptyIdxLU )
continue; // skip any zero segment
segsize = krep - kfnz + 1;
luptr = glu.xlusup(fsupc);
no_zeros = kfnz - fsupc;
Index isub = lptr + no_zeros;
Index off = u_rows-segsize;
for (Index i = 0; i < off; i++) U(i,u_col) = 0;
for (Index i = 0; i < segsize; i++)
{
Index irow = glu.lsub(isub);
U(i+off,u_col) = dense_col(irow);
++isub;
}
u_col++;
}
// solve U = A^-1 U
luptr = glu.xlusup(fsupc);
Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
no_zeros = (krep - u_rows + 1) - fsupc;
luptr += lda * no_zeros + no_zeros;
MappedMatrixBlock A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
U = A.template triangularView<UnitLower>().solve(U);
// update
luptr += u_rows;
MappedMatrixBlock B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
eigen_assert(tempv.size()>w*ldu + nrow*w + 1);
Index ldl = internal::first_multiple<Index>(nrow, PacketSize);
Index offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize)) % PacketSize;
MappedMatrixBlock L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
L.setZero();
internal::sparselu_gemm<Scalar>(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride());
// scatter U and L
u_col = 0;
for (jj = jcol; jj < jcol + w; jj++)
{
nextl_col = (jj-jcol) * m;
VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
kfnz = repfnz_col(krep);
if ( kfnz == emptyIdxLU )
continue; // skip any zero segment
segsize = krep - kfnz + 1;
no_zeros = kfnz - fsupc;
Index isub = lptr + no_zeros;
Index off = u_rows-segsize;
for (Index i = 0; i < segsize; i++)
{
Index irow = glu.lsub(isub++);
dense_col(irow) = U.coeff(i+off,u_col);
U.coeffRef(i+off,u_col) = 0;
}
// Scatter l into SPA dense[]
for (Index i = 0; i < nrow; i++)
{
Index irow = glu.lsub(isub++);
dense_col(irow) -= L.coeff(i,u_col);
L.coeffRef(i,u_col) = 0;
}
u_col++;
}
}
else // level 2 only
{
// Sequence through each column in the panel
for (jj = jcol; jj < jcol + w; jj++)
{
nextl_col = (jj-jcol) * m;
VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
kfnz = repfnz_col(krep);
if ( kfnz == emptyIdxLU )
continue; // skip any zero segment
segsize = krep - kfnz + 1;
luptr = glu.xlusup(fsupc);
Index lda = glu.xlusup(fsupc+1)-glu.xlusup(fsupc);// nsupr
// Perform a trianglar solve and block update,
// then scatter the result of sup-col update to dense[]
no_zeros = kfnz - fsupc;
if(segsize==1) LU_kernel_bmod<1>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
else if(segsize==2) LU_kernel_bmod<2>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
else if(segsize==3) LU_kernel_bmod<3>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
else LU_kernel_bmod<Dynamic>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
} // End for each column in the panel
}
} // End for each updating supernode
} // end panel bmod
} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_PANEL_BMOD_H
| 8,484 | 36.879464 | 149 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_panel_dfs.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of [s,d,c,z]panel_dfs.c file in SuperLU
* -- SuperLU routine (version 2.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* November 15, 1997
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_PANEL_DFS_H
#define SPARSELU_PANEL_DFS_H
namespace Eigen {
namespace internal {
template<typename IndexVector>
struct panel_dfs_traits
{
typedef typename IndexVector::Scalar StorageIndex;
panel_dfs_traits(Index jcol, StorageIndex* marker)
: m_jcol(jcol), m_marker(marker)
{}
bool update_segrep(Index krep, StorageIndex jj)
{
if(m_marker[krep]<m_jcol)
{
m_marker[krep] = jj;
return true;
}
return false;
}
void mem_expand(IndexVector& /*glu.lsub*/, Index /*nextl*/, Index /*chmark*/) {}
enum { ExpandMem = false };
Index m_jcol;
StorageIndex* m_marker;
};
template <typename Scalar, typename StorageIndex>
template <typename Traits>
void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const StorageIndex jj, IndexVector& perm_r,
Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
IndexVector& xplore, GlobalLU_t& glu,
Index& nextl_col, Index krow, Traits& traits
)
{
StorageIndex kmark = marker(krow);
// For each unmarked krow of jj
marker(krow) = jj;
StorageIndex kperm = perm_r(krow);
if (kperm == emptyIdxLU ) {
// krow is in L : place it in structure of L(*, jj)
panel_lsub(nextl_col++) = StorageIndex(krow); // krow is indexed into A
traits.mem_expand(panel_lsub, nextl_col, kmark);
}
else
{
// krow is in U : if its supernode-representative krep
// has been explored, update repfnz(*)
// krep = supernode representative of the current row
StorageIndex krep = glu.xsup(glu.supno(kperm)+1) - 1;
// First nonzero element in the current column:
StorageIndex myfnz = repfnz_col(krep);
if (myfnz != emptyIdxLU )
{
// Representative visited before
if (myfnz > kperm ) repfnz_col(krep) = kperm;
}
else
{
// Otherwise, perform dfs starting at krep
StorageIndex oldrep = emptyIdxLU;
parent(krep) = oldrep;
repfnz_col(krep) = kperm;
StorageIndex xdfs = glu.xlsub(krep);
Index maxdfs = xprune(krep);
StorageIndex kpar;
do
{
// For each unmarked kchild of krep
while (xdfs < maxdfs)
{
StorageIndex kchild = glu.lsub(xdfs);
xdfs++;
StorageIndex chmark = marker(kchild);
if (chmark != jj )
{
marker(kchild) = jj;
StorageIndex chperm = perm_r(kchild);
if (chperm == emptyIdxLU)
{
// case kchild is in L: place it in L(*, j)
panel_lsub(nextl_col++) = kchild;
traits.mem_expand(panel_lsub, nextl_col, chmark);
}
else
{
// case kchild is in U :
// chrep = its supernode-rep. If its rep has been explored,
// update its repfnz(*)
StorageIndex chrep = glu.xsup(glu.supno(chperm)+1) - 1;
myfnz = repfnz_col(chrep);
if (myfnz != emptyIdxLU)
{ // Visited before
if (myfnz > chperm)
repfnz_col(chrep) = chperm;
}
else
{ // Cont. dfs at snode-rep of kchild
xplore(krep) = xdfs;
oldrep = krep;
krep = chrep; // Go deeper down G(L)
parent(krep) = oldrep;
repfnz_col(krep) = chperm;
xdfs = glu.xlsub(krep);
maxdfs = xprune(krep);
} // end if myfnz != -1
} // end if chperm == -1
} // end if chmark !=jj
} // end while xdfs < maxdfs
// krow has no more unexplored nbrs :
// Place snode-rep krep in postorder DFS, if this
// segment is seen for the first time. (Note that
// "repfnz(krep)" may change later.)
// Baktrack dfs to its parent
if(traits.update_segrep(krep,jj))
//if (marker1(krep) < jcol )
{
segrep(nseg) = krep;
++nseg;
//marker1(krep) = jj;
}
kpar = parent(krep); // Pop recursion, mimic recursion
if (kpar == emptyIdxLU)
break; // dfs done
krep = kpar;
xdfs = xplore(krep);
maxdfs = xprune(krep);
} while (kpar != emptyIdxLU); // Do until empty stack
} // end if (myfnz = -1)
} // end if (kperm == -1)
}
/**
* \brief Performs a symbolic factorization on a panel of columns [jcol, jcol+w)
*
* A supernode representative is the last column of a supernode.
* The nonzeros in U[*,j] are segments that end at supernodes representatives
*
* The routine returns a list of the supernodal representatives
* in topological order of the dfs that generates them. This list is
* a superset of the topological order of each individual column within
* the panel.
* The location of the first nonzero in each supernodal segment
* (supernodal entry location) is also returned. Each column has
* a separate list for this purpose.
*
* Two markers arrays are used for dfs :
* marker[i] == jj, if i was visited during dfs of current column jj;
* marker1[i] >= jcol, if i was visited by earlier columns in this panel;
*
* \param[in] m number of rows in the matrix
* \param[in] w Panel size
* \param[in] jcol Starting column of the panel
* \param[in] A Input matrix in column-major storage
* \param[in] perm_r Row permutation
* \param[out] nseg Number of U segments
* \param[out] dense Accumulate the column vectors of the panel
* \param[out] panel_lsub Subscripts of the row in the panel
* \param[out] segrep Segment representative i.e first nonzero row of each segment
* \param[out] repfnz First nonzero location in each row
* \param[out] xprune The pruned elimination tree
* \param[out] marker work vector
* \param parent The elimination tree
* \param xplore work vector
* \param glu The global data structure
*
*/
template <typename Scalar, typename StorageIndex>
void SparseLUImpl<Scalar,StorageIndex>::panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
{
Index nextl_col; // Next available position in panel_lsub[*,jj]
// Initialize pointers
VectorBlock<IndexVector> marker1(marker, m, m);
nseg = 0;
panel_dfs_traits<IndexVector> traits(jcol, marker1.data());
// For each column in the panel
for (StorageIndex jj = StorageIndex(jcol); jj < jcol + w; jj++)
{
nextl_col = (jj - jcol) * m;
VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero location in each row
VectorBlock<ScalarVector> dense_col(dense,nextl_col, m); // Accumulate a column vector here
// For each nnz in A[*, jj] do depth first search
for (typename MatrixType::InnerIterator it(A, jj); it; ++it)
{
Index krow = it.row();
dense_col(krow) = it.value();
StorageIndex kmark = marker(krow);
if (kmark == jj)
continue; // krow visited before, go to the next nonzero
dfs_kernel(jj, perm_r, nseg, panel_lsub, segrep, repfnz_col, xprune, marker, parent,
xplore, glu, nextl_col, krow, traits);
}// end for nonzeros in column jj
} // end for column jj
}
} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_PANEL_DFS_H
| 9,026 | 33.853282 | 335 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_pivotL.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of xpivotL.c file in SuperLU
* -- SuperLU routine (version 3.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* October 15, 2003
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_PIVOTL_H
#define SPARSELU_PIVOTL_H
namespace Eigen {
namespace internal {
/**
* \brief Performs the numerical pivotin on the current column of L, and the CDIV operation.
*
* Pivot policy :
* (1) Compute thresh = u * max_(i>=j) abs(A_ij);
* (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
* pivot row = k;
* ELSE IF abs(A_jj) >= thresh THEN
* pivot row = j;
* ELSE
* pivot row = m;
*
* Note: If you absolutely want to use a given pivot order, then set u=0.0.
*
* \param jcol The current column of L
* \param diagpivotthresh diagonal pivoting threshold
* \param[in,out] perm_r Row permutation (threshold pivoting)
* \param[in] iperm_c column permutation - used to finf diagonal of Pc*A*Pc'
* \param[out] pivrow The pivot row
* \param glu Global LU data
* \return 0 if success, i > 0 if U(i,i) is exactly zero
*
*/
template <typename Scalar, typename StorageIndex>
Index SparseLUImpl<Scalar,StorageIndex>::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu)
{
Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol
Index nsupc = jcol - fsupc; // Number of columns in the supernode portion, excluding jcol; nsupc >=0
Index lptr = glu.xlsub(fsupc); // pointer to the starting location of the row subscripts for this supernode portion
Index nsupr = glu.xlsub(fsupc+1) - lptr; // Number of rows in the supernode
Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); // leading dimension
Scalar* lu_sup_ptr = &(glu.lusup.data()[glu.xlusup(fsupc)]); // Start of the current supernode
Scalar* lu_col_ptr = &(glu.lusup.data()[glu.xlusup(jcol)]); // Start of jcol in the supernode
StorageIndex* lsub_ptr = &(glu.lsub.data()[lptr]); // Start of row indices of the supernode
// Determine the largest abs numerical value for partial pivoting
Index diagind = iperm_c(jcol); // diagonal index
RealScalar pivmax(-1.0);
Index pivptr = nsupc;
Index diag = emptyIdxLU;
RealScalar rtemp;
Index isub, icol, itemp, k;
for (isub = nsupc; isub < nsupr; ++isub) {
using std::abs;
rtemp = abs(lu_col_ptr[isub]);
if (rtemp > pivmax) {
pivmax = rtemp;
pivptr = isub;
}
if (lsub_ptr[isub] == diagind) diag = isub;
}
// Test for singularity
if ( pivmax <= RealScalar(0.0) ) {
// if pivmax == -1, the column is structurally empty, otherwise it is only numerically zero
pivrow = pivmax < RealScalar(0.0) ? diagind : lsub_ptr[pivptr];
perm_r(pivrow) = StorageIndex(jcol);
return (jcol+1);
}
RealScalar thresh = diagpivotthresh * pivmax;
// Choose appropriate pivotal element
{
// Test if the diagonal element can be used as a pivot (given the threshold value)
if (diag >= 0 )
{
// Diagonal element exists
using std::abs;
rtemp = abs(lu_col_ptr[diag]);
if (rtemp != RealScalar(0.0) && rtemp >= thresh) pivptr = diag;
}
pivrow = lsub_ptr[pivptr];
}
// Record pivot row
perm_r(pivrow) = StorageIndex(jcol);
// Interchange row subscripts
if (pivptr != nsupc )
{
std::swap( lsub_ptr[pivptr], lsub_ptr[nsupc] );
// Interchange numerical values as well, for the two rows in the whole snode
// such that L is indexed the same way as A
for (icol = 0; icol <= nsupc; icol++)
{
itemp = pivptr + icol * lda;
std::swap(lu_sup_ptr[itemp], lu_sup_ptr[nsupc + icol * lda]);
}
}
// cdiv operations
Scalar temp = Scalar(1.0) / lu_col_ptr[nsupc];
for (k = nsupc+1; k < nsupr; k++)
lu_col_ptr[k] *= temp;
return 0;
}
} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_PIVOTL_H
| 4,977 | 35.072464 | 175 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_pruneL.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of [s,d,c,z]pruneL.c file in SuperLU
* -- SuperLU routine (version 2.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* November 15, 1997
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_PRUNEL_H
#define SPARSELU_PRUNEL_H
namespace Eigen {
namespace internal {
/**
* \brief Prunes the L-structure.
*
* It prunes the L-structure of supernodes whose L-structure contains the current pivot row "pivrow"
*
*
* \param jcol The current column of L
* \param[in] perm_r Row permutation
* \param[out] pivrow The pivot row
* \param nseg Number of segments
* \param segrep
* \param repfnz
* \param[out] xprune
* \param glu Global LU data
*
*/
template <typename Scalar, typename StorageIndex>
void SparseLUImpl<Scalar,StorageIndex>::pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg,
const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu)
{
// For each supernode-rep irep in U(*,j]
Index jsupno = glu.supno(jcol);
Index i,irep,irep1;
bool movnum, do_prune = false;
Index kmin = 0, kmax = 0, minloc, maxloc,krow;
for (i = 0; i < nseg; i++)
{
irep = segrep(i);
irep1 = irep + 1;
do_prune = false;
// Don't prune with a zero U-segment
if (repfnz(irep) == emptyIdxLU) continue;
// If a snode overlaps with the next panel, then the U-segment
// is fragmented into two parts -- irep and irep1. We should let
// pruning occur at the rep-column in irep1s snode.
if (glu.supno(irep) == glu.supno(irep1) ) continue; // don't prune
// If it has not been pruned & it has a nonz in row L(pivrow,i)
if (glu.supno(irep) != jsupno )
{
if ( xprune (irep) >= glu.xlsub(irep1) )
{
kmin = glu.xlsub(irep);
kmax = glu.xlsub(irep1) - 1;
for (krow = kmin; krow <= kmax; krow++)
{
if (glu.lsub(krow) == pivrow)
{
do_prune = true;
break;
}
}
}
if (do_prune)
{
// do a quicksort-type partition
// movnum=true means that the num values have to be exchanged
movnum = false;
if (irep == glu.xsup(glu.supno(irep)) ) // Snode of size 1
movnum = true;
while (kmin <= kmax)
{
if (perm_r(glu.lsub(kmax)) == emptyIdxLU)
kmax--;
else if ( perm_r(glu.lsub(kmin)) != emptyIdxLU)
kmin++;
else
{
// kmin below pivrow (not yet pivoted), and kmax
// above pivrow: interchange the two suscripts
std::swap(glu.lsub(kmin), glu.lsub(kmax));
// If the supernode has only one column, then we
// only keep one set of subscripts. For any subscript
// intercnahge performed, similar interchange must be
// done on the numerical values.
if (movnum)
{
minloc = glu.xlusup(irep) + ( kmin - glu.xlsub(irep) );
maxloc = glu.xlusup(irep) + ( kmax - glu.xlsub(irep) );
std::swap(glu.lusup(minloc), glu.lusup(maxloc));
}
kmin++;
kmax--;
}
} // end while
xprune(irep) = StorageIndex(kmin); //Pruning
} // end if do_prune
} // end pruning
} // End for each U-segment
}
} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_PRUNEL_H
| 4,543 | 32.167883 | 136 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseLU/SparseLU_relax_snode.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/* This file is a modified version of heap_relax_snode.c file in SuperLU
* -- SuperLU routine (version 3.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* October 15, 2003
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_RELAX_SNODE_H
#define SPARSELU_RELAX_SNODE_H
namespace Eigen {
namespace internal {
/**
* \brief Identify the initial relaxed supernodes
*
* This routine is applied to a column elimination tree.
* It assumes that the matrix has been reordered according to the postorder of the etree
* \param n the number of columns
* \param et elimination tree
* \param relax_columns Maximum number of columns allowed in a relaxed snode
* \param descendants Number of descendants of each node in the etree
* \param relax_end last column in a supernode
*/
template <typename Scalar, typename StorageIndex>
void SparseLUImpl<Scalar,StorageIndex>::relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)
{
// compute the number of descendants of each node in the etree
Index parent;
relax_end.setConstant(emptyIdxLU);
descendants.setZero();
for (Index j = 0; j < n; j++)
{
parent = et(j);
if (parent != n) // not the dummy root
descendants(parent) += descendants(j) + 1;
}
// Identify the relaxed supernodes by postorder traversal of the etree
Index snode_start; // beginning of a snode
for (Index j = 0; j < n; )
{
parent = et(j);
snode_start = j;
while ( parent != n && descendants(parent) < relax_columns )
{
j = parent;
parent = et(j);
}
// Found a supernode in postordered etree, j is the last column
relax_end(snode_start) = StorageIndex(j); // Record last column
j++;
// Search for a new leaf
while (descendants(j) != 0 && j < n) j++;
} // End postorder traversal of the etree
}
} // end namespace internal
} // end namespace Eigen
#endif
| 2,887 | 33.380952 | 161 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseQR/SparseQR.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012-2013 Desire Nuentsa <[email protected]>
// Copyright (C) 2012-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_QR_H
#define EIGEN_SPARSE_QR_H
namespace Eigen {
template<typename MatrixType, typename OrderingType> class SparseQR;
template<typename SparseQRType> struct SparseQRMatrixQReturnType;
template<typename SparseQRType> struct SparseQRMatrixQTransposeReturnType;
template<typename SparseQRType, typename Derived> struct SparseQR_QProduct;
namespace internal {
template <typename SparseQRType> struct traits<SparseQRMatrixQReturnType<SparseQRType> >
{
typedef typename SparseQRType::MatrixType ReturnType;
typedef typename ReturnType::StorageIndex StorageIndex;
typedef typename ReturnType::StorageKind StorageKind;
enum {
RowsAtCompileTime = Dynamic,
ColsAtCompileTime = Dynamic
};
};
template <typename SparseQRType> struct traits<SparseQRMatrixQTransposeReturnType<SparseQRType> >
{
typedef typename SparseQRType::MatrixType ReturnType;
};
template <typename SparseQRType, typename Derived> struct traits<SparseQR_QProduct<SparseQRType, Derived> >
{
typedef typename Derived::PlainObject ReturnType;
};
} // End namespace internal
/**
* \ingroup SparseQR_Module
* \class SparseQR
* \brief Sparse left-looking rank-revealing QR factorization
*
* This class implements a left-looking rank-revealing QR decomposition
* of sparse matrices. When a column has a norm less than a given tolerance
* it is implicitly permuted to the end. The QR factorization thus obtained is
* given by A*P = Q*R where R is upper triangular or trapezoidal.
*
* P is the column permutation which is the product of the fill-reducing and the
* rank-revealing permutations. Use colsPermutation() to get it.
*
* Q is the orthogonal matrix represented as products of Householder reflectors.
* Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
* You can then apply it to a vector.
*
* R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient.
* matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank.
*
* \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
* \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module
* OrderingMethods \endlink module for the list of built-in and external ordering methods.
*
* \implsparsesolverconcept
*
* \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
*
*/
template<typename _MatrixType, typename _OrderingType>
class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
{
protected:
typedef SparseSolverBase<SparseQR<_MatrixType,_OrderingType> > Base;
using Base::m_isInitialized;
public:
using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef _OrderingType OrderingType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> QRMatrixType;
typedef Matrix<StorageIndex, Dynamic, 1> IndexVector;
typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public:
SparseQR () : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{ }
/** Construct a QR factorization of the matrix \a mat.
*
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
*
* \sa compute()
*/
explicit SparseQR(const MatrixType& mat) : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{
compute(mat);
}
/** Computes the QR factorization of the sparse matrix \a mat.
*
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
*
* \sa analyzePattern(), factorize()
*/
void compute(const MatrixType& mat)
{
analyzePattern(mat);
factorize(mat);
}
void analyzePattern(const MatrixType& mat);
void factorize(const MatrixType& mat);
/** \returns the number of rows of the represented matrix.
*/
inline Index rows() const { return m_pmat.rows(); }
/** \returns the number of columns of the represented matrix.
*/
inline Index cols() const { return m_pmat.cols();}
/** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization.
* \warning The entries of the returned matrix are not sorted. This means that using it in algorithms
* expecting sorted entries will fail. This include random coefficient accesses (SpaseMatrix::coeff()),
* and coefficient-wise operations. Matrix products and triangular solves are fine though.
*
* To sort the entries, you can assign it to a row-major matrix, and if a column-major matrix
* is required, you can copy it again:
* \code
* SparseMatrix<double> R = qr.matrixR(); // column-major, not sorted!
* SparseMatrix<double,RowMajor> Rr = qr.matrixR(); // row-major, sorted
* SparseMatrix<double> Rc = Rr; // column-major, sorted
* \endcode
*/
const QRMatrixType& matrixR() const { return m_R; }
/** \returns the number of non linearly dependent columns as determined by the pivoting threshold.
*
* \sa setPivotThreshold()
*/
Index rank() const
{
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
return m_nonzeropivots;
}
/** \returns an expression of the matrix Q as products of sparse Householder reflectors.
* The common usage of this function is to apply it to a dense matrix or vector
* \code
* VectorXd B1, B2;
* // Initialize B1
* B2 = matrixQ() * B1;
* \endcode
*
* To get a plain SparseMatrix representation of Q:
* \code
* SparseMatrix<double> Q;
* Q = SparseQR<SparseMatrix<double> >(A).matrixQ();
* \endcode
* Internally, this call simply performs a sparse product between the matrix Q
* and a sparse identity matrix. However, due to the fact that the sparse
* reflectors are stored unsorted, two transpositions are needed to sort
* them before performing the product.
*/
SparseQRMatrixQReturnType<SparseQR> matrixQ() const
{ return SparseQRMatrixQReturnType<SparseQR>(*this); }
/** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R
* It is the combination of the fill-in reducing permutation and numerical column pivoting.
*/
const PermutationType& colsPermutation() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_outputPerm_c;
}
/** \returns A string describing the type of error.
* This method is provided to ease debugging, not to handle errors.
*/
std::string lastErrorMessage() const { return m_lastError; }
/** \internal */
template<typename Rhs, typename Dest>
bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &dest) const
{
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
Index rank = this->rank();
// Compute Q^T * b;
typename Dest::PlainObject y, b;
y = this->matrixQ().transpose() * B;
b = y;
// Solve with the triangular matrix R
y.resize((std::max<Index>)(cols(),y.rows()),y.cols());
y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));
y.bottomRows(y.rows()-rank).setZero();
// Apply the column permutation
if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols());
else dest = y.topRows(cols());
m_info = Success;
return true;
}
/** Sets the threshold that is used to determine linearly dependent columns during the factorization.
*
* In practice, if during the factorization the norm of the column that has to be eliminated is below
* this threshold, then the entire column is treated as zero, and it is moved at the end.
*/
void setPivotThreshold(const RealScalar& threshold)
{
m_useDefaultThreshold = false;
m_threshold = threshold;
}
/** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
*
* \sa compute()
*/
template<typename Rhs>
inline const Solve<SparseQR, Rhs> solve(const MatrixBase<Rhs>& B) const
{
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
return Solve<SparseQR, Rhs>(*this, B.derived());
}
template<typename Rhs>
inline const Solve<SparseQR, Rhs> solve(const SparseMatrixBase<Rhs>& B) const
{
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
return Solve<SparseQR, Rhs>(*this, B.derived());
}
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was successful,
* \c NumericalIssue if the QR factorization reports a numerical problem
* \c InvalidInput if the input matrix is invalid
*
* \sa iparm()
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
/** \internal */
inline void _sort_matrix_Q()
{
if(this->m_isQSorted) return;
// The matrix Q is sorted during the transposition
SparseMatrix<Scalar, RowMajor, Index> mQrm(this->m_Q);
this->m_Q = mQrm;
this->m_isQSorted = true;
}
protected:
bool m_analysisIsok;
bool m_factorizationIsok;
mutable ComputationInfo m_info;
std::string m_lastError;
QRMatrixType m_pmat; // Temporary matrix
QRMatrixType m_R; // The triangular factor matrix
QRMatrixType m_Q; // The orthogonal reflectors
ScalarVector m_hcoeffs; // The Householder coefficients
PermutationType m_perm_c; // Fill-reducing Column permutation
PermutationType m_pivotperm; // The permutation for rank revealing
PermutationType m_outputPerm_c; // The final column permutation
RealScalar m_threshold; // Threshold to determine null Householder reflections
bool m_useDefaultThreshold; // Use default threshold
Index m_nonzeropivots; // Number of non zero pivots found
IndexVector m_etree; // Column elimination tree
IndexVector m_firstRowElt; // First element in each row
bool m_isQSorted; // whether Q is sorted or not
bool m_isEtreeOk; // whether the elimination tree match the initial input matrix
template <typename, typename > friend struct SparseQR_QProduct;
};
/** \brief Preprocessing step of a QR factorization
*
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
*
* In this step, the fill-reducing permutation is computed and applied to the columns of A
* and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited.
*
* \note In this step it is assumed that there is no empty row in the matrix \a mat.
*/
template <typename MatrixType, typename OrderingType>
void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
{
eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR");
// Copy to a column major matrix if the input is rowmajor
typename internal::conditional<MatrixType::IsRowMajor,QRMatrixType,const MatrixType&>::type matCpy(mat);
// Compute the column fill reducing ordering
OrderingType ord;
ord(matCpy, m_perm_c);
Index n = mat.cols();
Index m = mat.rows();
Index diagSize = (std::min)(m,n);
if (!m_perm_c.size())
{
m_perm_c.resize(n);
m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1));
}
// Compute the column elimination tree of the permuted matrix
m_outputPerm_c = m_perm_c.inverse();
internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
m_isEtreeOk = true;
m_R.resize(m, n);
m_Q.resize(m, diagSize);
// Allocate space for nonzero elements : rough estimation
m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
m_Q.reserve(2*mat.nonZeros());
m_hcoeffs.resize(diagSize);
m_analysisIsok = true;
}
/** \brief Performs the numerical QR factorization of the input matrix
*
* The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with
* a matrix having the same sparsity pattern than \a mat.
*
* \param mat The sparse column-major matrix
*/
template <typename MatrixType, typename OrderingType>
void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
{
using std::abs;
eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
StorageIndex m = StorageIndex(mat.rows());
StorageIndex n = StorageIndex(mat.cols());
StorageIndex diagSize = (std::min)(m,n);
IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes
IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q
Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
ScalarVector tval(m); // The dense vector used to compute the current column
RealScalar pivotThreshold = m_threshold;
m_R.setZero();
m_Q.setZero();
m_pmat = mat;
if(!m_isEtreeOk)
{
m_outputPerm_c = m_perm_c.inverse();
internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
m_isEtreeOk = true;
}
m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
// Apply the fill-in reducing permutation lazily:
{
// If the input is row major, copy the original column indices,
// otherwise directly use the input matrix
//
IndexVector originalOuterIndicesCpy;
const StorageIndex *originalOuterIndices = mat.outerIndexPtr();
if(MatrixType::IsRowMajor)
{
originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1);
originalOuterIndices = originalOuterIndicesCpy.data();
}
for (int i = 0; i < n; i++)
{
Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
m_pmat.outerIndexPtr()[p] = originalOuterIndices[i];
m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i];
}
}
/* Compute the default threshold as in MatLab, see:
* Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
* Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
*/
if(m_useDefaultThreshold)
{
RealScalar max2Norm = 0.0;
for (int j = 0; j < n; j++) max2Norm = numext::maxi(max2Norm, m_pmat.col(j).norm());
if(max2Norm==RealScalar(0))
max2Norm = RealScalar(1);
pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
}
// Initialize the numerical permutation
m_pivotperm.setIdentity(n);
StorageIndex nonzeroCol = 0; // Record the number of valid pivots
m_Q.startVec(0);
// Left looking rank-revealing QR factorization: compute a column of R and Q at a time
for (StorageIndex col = 0; col < n; ++col)
{
mark.setConstant(-1);
m_R.startVec(col);
mark(nonzeroCol) = col;
Qidx(0) = nonzeroCol;
nzcolR = 0; nzcolQ = 1;
bool found_diag = nonzeroCol>=m;
tval.setZero();
// Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
// all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k.
// Note: if the diagonal entry does not exist, then its contribution must be explicitly added,
// thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found.
for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
{
StorageIndex curIdx = nonzeroCol;
if(itp) curIdx = StorageIndex(itp.row());
if(curIdx == nonzeroCol) found_diag = true;
// Get the nonzeros indexes of the current column of R
StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here
if (st < 0 )
{
m_lastError = "Empty row found during numerical factorization";
m_info = InvalidInput;
return;
}
// Traverse the etree
Index bi = nzcolR;
for (; mark(st) != col; st = m_etree(st))
{
Ridx(nzcolR) = st; // Add this row to the list,
mark(st) = col; // and mark this row as visited
nzcolR++;
}
// Reverse the list to get the topological ordering
Index nt = nzcolR-bi;
for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1));
// Copy the current (curIdx,pcol) value of the input matrix
if(itp) tval(curIdx) = itp.value();
else tval(curIdx) = Scalar(0);
// Compute the pattern of Q(:,k)
if(curIdx > nonzeroCol && mark(curIdx) != col )
{
Qidx(nzcolQ) = curIdx; // Add this row to the pattern of Q,
mark(curIdx) = col; // and mark it as visited
nzcolQ++;
}
}
// Browse all the indexes of R(:,col) in reverse order
for (Index i = nzcolR-1; i >= 0; i--)
{
Index curIdx = Ridx(i);
// Apply the curIdx-th householder vector to the current column (temporarily stored into tval)
Scalar tdot(0);
// First compute q' * tval
tdot = m_Q.col(curIdx).dot(tval);
tdot *= m_hcoeffs(curIdx);
// Then update tval = tval - q * tau
// FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse")
for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
tval(itq.row()) -= itq.value() * tdot;
// Detect fill-in for the current column of Q
if(m_etree(Ridx(i)) == nonzeroCol)
{
for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
{
StorageIndex iQ = StorageIndex(itq.row());
if (mark(iQ) != col)
{
Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q,
mark(iQ) = col; // and mark it as visited
}
}
}
} // End update current column
Scalar tau = RealScalar(0);
RealScalar beta = 0;
if(nonzeroCol < diagSize)
{
// Compute the Householder reflection that eliminate the current column
// FIXME this step should call the Householder module.
Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
// First, the squared norm of Q((col+1):m, col)
RealScalar sqrNorm = 0.;
for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
{
beta = numext::real(c0);
tval(Qidx(0)) = 1;
}
else
{
using std::sqrt;
beta = sqrt(numext::abs2(c0) + sqrNorm);
if(numext::real(c0) >= RealScalar(0))
beta = -beta;
tval(Qidx(0)) = 1;
for (Index itq = 1; itq < nzcolQ; ++itq)
tval(Qidx(itq)) /= (c0 - beta);
tau = numext::conj((beta-c0) / beta);
}
}
// Insert values in R
for (Index i = nzcolR-1; i >= 0; i--)
{
Index curIdx = Ridx(i);
if(curIdx < nonzeroCol)
{
m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx);
tval(curIdx) = Scalar(0.);
}
}
if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold)
{
m_R.insertBackByOuterInner(col, nonzeroCol) = beta;
// The householder coefficient
m_hcoeffs(nonzeroCol) = tau;
// Record the householder reflections
for (Index itq = 0; itq < nzcolQ; ++itq)
{
Index iQ = Qidx(itq);
m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ);
tval(iQ) = Scalar(0.);
}
nonzeroCol++;
if(nonzeroCol<diagSize)
m_Q.startVec(nonzeroCol);
}
else
{
// Zero pivot found: move implicitly this column to the end
for (Index j = nonzeroCol; j < n-1; j++)
std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);
// Recompute the column elimination tree
internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
m_isEtreeOk = false;
}
}
m_hcoeffs.tail(diagSize-nonzeroCol).setZero();
// Finalize the column pointers of the sparse matrices R and Q
m_Q.finalize();
m_Q.makeCompressed();
m_R.finalize();
m_R.makeCompressed();
m_isQSorted = false;
m_nonzeropivots = nonzeroCol;
if(nonzeroCol<n)
{
// Permute the triangular factor to put the 'dead' columns to the end
QRMatrixType tempR(m_R);
m_R = tempR * m_pivotperm;
// Update the column permutation
m_outputPerm_c = m_outputPerm_c * m_pivotperm;
}
m_isInitialized = true;
m_factorizationIsok = true;
m_info = Success;
}
template <typename SparseQRType, typename Derived>
struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived> >
{
typedef typename SparseQRType::QRMatrixType MatrixType;
typedef typename SparseQRType::Scalar Scalar;
// Get the references
SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) :
m_qr(qr),m_other(other),m_transpose(transpose) {}
inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); }
inline Index cols() const { return m_other.cols(); }
// Assign to a vector
template<typename DesType>
void evalTo(DesType& res) const
{
Index m = m_qr.rows();
Index n = m_qr.cols();
Index diagSize = (std::min)(m,n);
res = m_other;
if (m_transpose)
{
eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
//Compute res = Q' * other column by column
for(Index j = 0; j < res.cols(); j++){
for (Index k = 0; k < diagSize; k++)
{
Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j));
if(tau==Scalar(0)) continue;
tau = tau * m_qr.m_hcoeffs(k);
res.col(j) -= tau * m_qr.m_Q.col(k);
}
}
}
else
{
eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
// Compute res = Q * other column by column
for(Index j = 0; j < res.cols(); j++)
{
for (Index k = diagSize-1; k >=0; k--)
{
Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j));
if(tau==Scalar(0)) continue;
tau = tau * m_qr.m_hcoeffs(k);
res.col(j) -= tau * m_qr.m_Q.col(k);
}
}
}
}
const SparseQRType& m_qr;
const Derived& m_other;
bool m_transpose;
};
template<typename SparseQRType>
struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<SparseQRType> >
{
typedef typename SparseQRType::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
enum {
RowsAtCompileTime = Dynamic,
ColsAtCompileTime = Dynamic
};
explicit SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {}
template<typename Derived>
SparseQR_QProduct<SparseQRType, Derived> operator*(const MatrixBase<Derived>& other)
{
return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(),false);
}
SparseQRMatrixQTransposeReturnType<SparseQRType> adjoint() const
{
return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
}
inline Index rows() const { return m_qr.rows(); }
inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }
// To use for operations with the transpose of Q
SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const
{
return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
}
const SparseQRType& m_qr;
};
template<typename SparseQRType>
struct SparseQRMatrixQTransposeReturnType
{
explicit SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {}
template<typename Derived>
SparseQR_QProduct<SparseQRType,Derived> operator*(const MatrixBase<Derived>& other)
{
return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(), true);
}
const SparseQRType& m_qr;
};
namespace internal {
template<typename SparseQRType>
struct evaluator_traits<SparseQRMatrixQReturnType<SparseQRType> >
{
typedef typename SparseQRType::MatrixType MatrixType;
typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
typedef SparseShape Shape;
};
template< typename DstXprType, typename SparseQRType>
struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Sparse>
{
typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
typedef typename DstXprType::Scalar Scalar;
typedef typename DstXprType::StorageIndex StorageIndex;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
{
typename DstXprType::PlainObject idMat(src.m_qr.rows(), src.m_qr.rows());
idMat.setIdentity();
// Sort the sparse householder reflectors if needed
const_cast<SparseQRType *>(&src.m_qr)->_sort_matrix_Q();
dst = SparseQR_QProduct<SparseQRType, DstXprType>(src.m_qr, idMat, false);
}
};
template< typename DstXprType, typename SparseQRType>
struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Dense>
{
typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
typedef typename DstXprType::Scalar Scalar;
typedef typename DstXprType::StorageIndex StorageIndex;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
{
dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows());
}
};
} // end namespace internal
} // end namespace Eigen
#endif
| 28,084 | 36.952703 | 163 |
h
|
abess
|
abess-master/python/include/Eigen/src/StlSupport/StdDeque.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Hauke Heibel <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_STDDEQUE_H
#define EIGEN_STDDEQUE_H
#include "details.h"
/**
* This section contains a convenience MACRO which allows an easy specialization of
* std::deque such that for data types with alignment issues the correct allocator
* is used automatically.
*/
#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \
namespace std \
{ \
template<> \
class deque<__VA_ARGS__, std::allocator<__VA_ARGS__> > \
: public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
{ \
typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \
public: \
typedef __VA_ARGS__ value_type; \
typedef deque_base::allocator_type allocator_type; \
typedef deque_base::size_type size_type; \
typedef deque_base::iterator iterator; \
explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \
template<typename InputIterator> \
deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \
deque(const deque& c) : deque_base(c) {} \
explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
deque(iterator start, iterator end) : deque_base(start, end) {} \
deque& operator=(const deque& x) { \
deque_base::operator=(x); \
return *this; \
} \
}; \
}
// check whether we really need the std::deque specialization
#if !EIGEN_HAS_CXX11_CONTAINERS && !(defined(_GLIBCXX_DEQUE) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::deque::resize(size_type,const T&). */
namespace std {
#define EIGEN_STD_DEQUE_SPECIALIZATION_BODY \
public: \
typedef T value_type; \
typedef typename deque_base::allocator_type allocator_type; \
typedef typename deque_base::size_type size_type; \
typedef typename deque_base::iterator iterator; \
typedef typename deque_base::const_iterator const_iterator; \
explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \
template<typename InputIterator> \
deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \
: deque_base(first, last, a) {} \
deque(const deque& c) : deque_base(c) {} \
explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
deque(iterator start, iterator end) : deque_base(start, end) {} \
deque& operator=(const deque& x) { \
deque_base::operator=(x); \
return *this; \
}
template<typename T>
class deque<T,EIGEN_ALIGNED_ALLOCATOR<T> >
: public deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
{
typedef deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > deque_base;
EIGEN_STD_DEQUE_SPECIALIZATION_BODY
void resize(size_type new_size)
{ resize(new_size, T()); }
#if defined(_DEQUE_)
// workaround MSVC std::deque implementation
void resize(size_type new_size, const value_type& x)
{
if (deque_base::size() < new_size)
deque_base::_Insert_n(deque_base::end(), new_size - deque_base::size(), x);
else if (new_size < deque_base::size())
deque_base::erase(deque_base::begin() + new_size, deque_base::end());
}
void push_back(const value_type& x)
{ deque_base::push_back(x); }
void push_front(const value_type& x)
{ deque_base::push_front(x); }
using deque_base::insert;
iterator insert(const_iterator position, const value_type& x)
{ return deque_base::insert(position,x); }
void insert(const_iterator position, size_type new_size, const value_type& x)
{ deque_base::insert(position, new_size, x); }
#elif defined(_GLIBCXX_DEQUE) && EIGEN_GNUC_AT_LEAST(4,2)
// workaround GCC std::deque implementation
void resize(size_type new_size, const value_type& x)
{
if (new_size < deque_base::size())
deque_base::_M_erase_at_end(this->_M_impl._M_start + new_size);
else
deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);
}
#else
// either GCC 4.1 or non-GCC
// default implementation which should always work.
void resize(size_type new_size, const value_type& x)
{
if (new_size < deque_base::size())
deque_base::erase(deque_base::begin() + new_size, deque_base::end());
else if (new_size > deque_base::size())
deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);
}
#endif
};
}
#endif // check whether specialization is actually required
#endif // EIGEN_STDDEQUE_H
| 5,117 | 39.299213 | 180 |
h
|
abess
|
abess-master/python/include/Eigen/src/StlSupport/StdList.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Hauke Heibel <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_STDLIST_H
#define EIGEN_STDLIST_H
#include "details.h"
/**
* This section contains a convenience MACRO which allows an easy specialization of
* std::list such that for data types with alignment issues the correct allocator
* is used automatically.
*/
#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \
namespace std \
{ \
template<> \
class list<__VA_ARGS__, std::allocator<__VA_ARGS__> > \
: public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
{ \
typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \
public: \
typedef __VA_ARGS__ value_type; \
typedef list_base::allocator_type allocator_type; \
typedef list_base::size_type size_type; \
typedef list_base::iterator iterator; \
explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \
template<typename InputIterator> \
list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \
list(const list& c) : list_base(c) {} \
explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
list(iterator start, iterator end) : list_base(start, end) {} \
list& operator=(const list& x) { \
list_base::operator=(x); \
return *this; \
} \
}; \
}
// check whether we really need the std::list specialization
#if !EIGEN_HAS_CXX11_CONTAINERS && !(defined(_GLIBCXX_LIST) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::list::resize(size_type,const T&). */
namespace std
{
#define EIGEN_STD_LIST_SPECIALIZATION_BODY \
public: \
typedef T value_type; \
typedef typename list_base::allocator_type allocator_type; \
typedef typename list_base::size_type size_type; \
typedef typename list_base::iterator iterator; \
typedef typename list_base::const_iterator const_iterator; \
explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \
template<typename InputIterator> \
list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \
: list_base(first, last, a) {} \
list(const list& c) : list_base(c) {} \
explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
list(iterator start, iterator end) : list_base(start, end) {} \
list& operator=(const list& x) { \
list_base::operator=(x); \
return *this; \
}
template<typename T>
class list<T,EIGEN_ALIGNED_ALLOCATOR<T> >
: public list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
{
typedef list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > list_base;
EIGEN_STD_LIST_SPECIALIZATION_BODY
void resize(size_type new_size)
{ resize(new_size, T()); }
void resize(size_type new_size, const value_type& x)
{
if (list_base::size() < new_size)
list_base::insert(list_base::end(), new_size - list_base::size(), x);
else
while (new_size < list_base::size()) list_base::pop_back();
}
#if defined(_LIST_)
// workaround MSVC std::list implementation
void push_back(const value_type& x)
{ list_base::push_back(x); }
using list_base::insert;
iterator insert(const_iterator position, const value_type& x)
{ return list_base::insert(position,x); }
void insert(const_iterator position, size_type new_size, const value_type& x)
{ list_base::insert(position, new_size, x); }
#endif
};
}
#endif // check whether specialization is actually required
#endif // EIGEN_STDLIST_H
| 4,147 | 37.766355 | 178 |
h
|
abess
|
abess-master/python/include/Eigen/src/StlSupport/StdVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Hauke Heibel <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_STDVECTOR_H
#define EIGEN_STDVECTOR_H
#include "details.h"
/**
* This section contains a convenience MACRO which allows an easy specialization of
* std::vector such that for data types with alignment issues the correct allocator
* is used automatically.
*/
#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \
namespace std \
{ \
template<> \
class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \
: public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
{ \
typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \
public: \
typedef __VA_ARGS__ value_type; \
typedef vector_base::allocator_type allocator_type; \
typedef vector_base::size_type size_type; \
typedef vector_base::iterator iterator; \
explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \
template<typename InputIterator> \
vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \
vector(const vector& c) : vector_base(c) {} \
explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
vector(iterator start, iterator end) : vector_base(start, end) {} \
vector& operator=(const vector& x) { \
vector_base::operator=(x); \
return *this; \
} \
}; \
}
// Don't specialize if containers are implemented according to C++11
#if !EIGEN_HAS_CXX11_CONTAINERS
namespace std {
#define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \
public: \
typedef T value_type; \
typedef typename vector_base::allocator_type allocator_type; \
typedef typename vector_base::size_type size_type; \
typedef typename vector_base::iterator iterator; \
typedef typename vector_base::const_iterator const_iterator; \
explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \
template<typename InputIterator> \
vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \
: vector_base(first, last, a) {} \
vector(const vector& c) : vector_base(c) {} \
explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
vector(iterator start, iterator end) : vector_base(start, end) {} \
vector& operator=(const vector& x) { \
vector_base::operator=(x); \
return *this; \
}
template<typename T>
class vector<T,EIGEN_ALIGNED_ALLOCATOR<T> >
: public vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
{
typedef vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > vector_base;
EIGEN_STD_VECTOR_SPECIALIZATION_BODY
void resize(size_type new_size)
{ resize(new_size, T()); }
#if defined(_VECTOR_)
// workaround MSVC std::vector implementation
void resize(size_type new_size, const value_type& x)
{
if (vector_base::size() < new_size)
vector_base::_Insert_n(vector_base::end(), new_size - vector_base::size(), x);
else if (new_size < vector_base::size())
vector_base::erase(vector_base::begin() + new_size, vector_base::end());
}
void push_back(const value_type& x)
{ vector_base::push_back(x); }
using vector_base::insert;
iterator insert(const_iterator position, const value_type& x)
{ return vector_base::insert(position,x); }
void insert(const_iterator position, size_type new_size, const value_type& x)
{ vector_base::insert(position, new_size, x); }
#elif defined(_GLIBCXX_VECTOR) && (!(EIGEN_GNUC_AT_LEAST(4,1)))
/* Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&).
* However, this specialization is still needed to make the above EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION trick to work. */
void resize(size_type new_size, const value_type& x)
{
vector_base::resize(new_size,x);
}
#elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2)
// workaround GCC std::vector implementation
void resize(size_type new_size, const value_type& x)
{
if (new_size < vector_base::size())
vector_base::_M_erase_at_end(this->_M_impl._M_start + new_size);
else
vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);
}
#else
// either GCC 4.1 or non-GCC
// default implementation which should always work.
void resize(size_type new_size, const value_type& x)
{
if (new_size < vector_base::size())
vector_base::erase(vector_base::begin() + new_size, vector_base::end());
else if (new_size > vector_base::size())
vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);
}
#endif
};
}
#endif // !EIGEN_HAS_CXX11_CONTAINERS
#endif // EIGEN_STDVECTOR_H
| 5,330 | 39.386364 | 130 |
h
|
abess
|
abess-master/python/include/Eigen/src/StlSupport/details.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Hauke Heibel <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_STL_DETAILS_H
#define EIGEN_STL_DETAILS_H
#ifndef EIGEN_ALIGNED_ALLOCATOR
#define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator
#endif
namespace Eigen {
// This one is needed to prevent reimplementing the whole std::vector.
template <class T>
class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR<T>
{
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template<class U>
struct rebind
{
typedef aligned_allocator_indirection<U> other;
};
aligned_allocator_indirection() {}
aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR<T>() {}
aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<T>& ) {}
template<class U>
aligned_allocator_indirection(const aligned_allocator_indirection<U>& ) {}
template<class U>
aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<U>& ) {}
~aligned_allocator_indirection() {}
};
#if EIGEN_COMP_MSVC
// sometimes, MSVC detects, at compile time, that the argument x
// in std::vector::resize(size_t s,T x) won't be aligned and generate an error
// even if this function is never called. Whence this little wrapper.
#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \
typename Eigen::internal::conditional< \
Eigen::internal::is_arithmetic<T>::value, \
T, \
Eigen::internal::workaround_msvc_stl_support<T> \
>::type
namespace internal {
template<typename T> struct workaround_msvc_stl_support : public T
{
inline workaround_msvc_stl_support() : T() {}
inline workaround_msvc_stl_support(const T& other) : T(other) {}
inline operator T& () { return *static_cast<T*>(this); }
inline operator const T& () const { return *static_cast<const T*>(this); }
template<typename OtherT>
inline T& operator=(const OtherT& other)
{ T::operator=(other); return *this; }
inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other)
{ T::operator=(other); return *this; }
};
}
#else
#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) T
#endif
}
#endif // EIGEN_STL_DETAILS_H
| 2,809 | 32.058824 | 106 |
h
|
abess
|
abess-master/python/include/Eigen/src/SuperLUSupport/SuperLUSupport.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SUPERLUSUPPORT_H
#define EIGEN_SUPERLUSUPPORT_H
namespace Eigen {
#if defined(SUPERLU_MAJOR_VERSION) && (SUPERLU_MAJOR_VERSION >= 5)
#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \
extern "C" { \
extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \
char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \
void *, int, SuperMatrix *, SuperMatrix *, \
FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \
GlobalLU_t *, mem_usage_t *, SuperLUStat_t *, int *); \
} \
inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \
int *perm_c, int *perm_r, int *etree, char *equed, \
FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
SuperMatrix *U, void *work, int lwork, \
SuperMatrix *B, SuperMatrix *X, \
FLOATTYPE *recip_pivot_growth, \
FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \
SuperLUStat_t *stats, int *info, KEYTYPE) { \
mem_usage_t mem_usage; \
GlobalLU_t gLU; \
PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \
U, work, lwork, B, X, recip_pivot_growth, rcond, \
ferr, berr, &gLU, &mem_usage, stats, info); \
return mem_usage.for_lu; /* bytes used by the factor storage */ \
}
#else // version < 5.0
#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \
extern "C" { \
extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \
char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \
void *, int, SuperMatrix *, SuperMatrix *, \
FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \
mem_usage_t *, SuperLUStat_t *, int *); \
} \
inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \
int *perm_c, int *perm_r, int *etree, char *equed, \
FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
SuperMatrix *U, void *work, int lwork, \
SuperMatrix *B, SuperMatrix *X, \
FLOATTYPE *recip_pivot_growth, \
FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \
SuperLUStat_t *stats, int *info, KEYTYPE) { \
mem_usage_t mem_usage; \
PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \
U, work, lwork, B, X, recip_pivot_growth, rcond, \
ferr, berr, &mem_usage, stats, info); \
return mem_usage.for_lu; /* bytes used by the factor storage */ \
}
#endif
DECL_GSSVX(s,float,float)
DECL_GSSVX(c,float,std::complex<float>)
DECL_GSSVX(d,double,double)
DECL_GSSVX(z,double,std::complex<double>)
#ifdef MILU_ALPHA
#define EIGEN_SUPERLU_HAS_ILU
#endif
#ifdef EIGEN_SUPERLU_HAS_ILU
// similarly for the incomplete factorization using gsisx
#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE) \
extern "C" { \
extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \
char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \
void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *, \
mem_usage_t *, SuperLUStat_t *, int *); \
} \
inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A, \
int *perm_c, int *perm_r, int *etree, char *equed, \
FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
SuperMatrix *U, void *work, int lwork, \
SuperMatrix *B, SuperMatrix *X, \
FLOATTYPE *recip_pivot_growth, \
FLOATTYPE *rcond, \
SuperLUStat_t *stats, int *info, KEYTYPE) { \
mem_usage_t mem_usage; \
PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L, \
U, work, lwork, B, X, recip_pivot_growth, rcond, \
&mem_usage, stats, info); \
return mem_usage.for_lu; /* bytes used by the factor storage */ \
}
DECL_GSISX(s,float,float)
DECL_GSISX(c,float,std::complex<float>)
DECL_GSISX(d,double,double)
DECL_GSISX(z,double,std::complex<double>)
#endif
template<typename MatrixType>
struct SluMatrixMapHelper;
/** \internal
*
* A wrapper class for SuperLU matrices. It supports only compressed sparse matrices
* and dense matrices. Supernodal and other fancy format are not supported by this wrapper.
*
* This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.
*/
struct SluMatrix : SuperMatrix
{
SluMatrix()
{
Store = &storage;
}
SluMatrix(const SluMatrix& other)
: SuperMatrix(other)
{
Store = &storage;
storage = other.storage;
}
SluMatrix& operator=(const SluMatrix& other)
{
SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));
Store = &storage;
storage = other.storage;
return *this;
}
struct
{
union {int nnz;int lda;};
void *values;
int *innerInd;
int *outerInd;
} storage;
void setStorageType(Stype_t t)
{
Stype = t;
if (t==SLU_NC || t==SLU_NR || t==SLU_DN)
Store = &storage;
else
{
eigen_assert(false && "storage type not supported");
Store = 0;
}
}
template<typename Scalar>
void setScalarType()
{
if (internal::is_same<Scalar,float>::value)
Dtype = SLU_S;
else if (internal::is_same<Scalar,double>::value)
Dtype = SLU_D;
else if (internal::is_same<Scalar,std::complex<float> >::value)
Dtype = SLU_C;
else if (internal::is_same<Scalar,std::complex<double> >::value)
Dtype = SLU_Z;
else
{
eigen_assert(false && "Scalar type not supported by SuperLU");
}
}
template<typename MatrixType>
static SluMatrix Map(MatrixBase<MatrixType>& _mat)
{
MatrixType& mat(_mat.derived());
eigen_assert( ((MatrixType::Flags&RowMajorBit)!=RowMajorBit) && "row-major dense matrices are not supported by SuperLU");
SluMatrix res;
res.setStorageType(SLU_DN);
res.setScalarType<typename MatrixType::Scalar>();
res.Mtype = SLU_GE;
res.nrow = internal::convert_index<int>(mat.rows());
res.ncol = internal::convert_index<int>(mat.cols());
res.storage.lda = internal::convert_index<int>(MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride());
res.storage.values = (void*)(mat.data());
return res;
}
template<typename MatrixType>
static SluMatrix Map(SparseMatrixBase<MatrixType>& a_mat)
{
MatrixType &mat(a_mat.derived());
SluMatrix res;
if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
{
res.setStorageType(SLU_NR);
res.nrow = internal::convert_index<int>(mat.cols());
res.ncol = internal::convert_index<int>(mat.rows());
}
else
{
res.setStorageType(SLU_NC);
res.nrow = internal::convert_index<int>(mat.rows());
res.ncol = internal::convert_index<int>(mat.cols());
}
res.Mtype = SLU_GE;
res.storage.nnz = internal::convert_index<int>(mat.nonZeros());
res.storage.values = mat.valuePtr();
res.storage.innerInd = mat.innerIndexPtr();
res.storage.outerInd = mat.outerIndexPtr();
res.setScalarType<typename MatrixType::Scalar>();
// FIXME the following is not very accurate
if (MatrixType::Flags & Upper)
res.Mtype = SLU_TRU;
if (MatrixType::Flags & Lower)
res.Mtype = SLU_TRL;
eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
return res;
}
};
template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
{
typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
static void run(MatrixType& mat, SluMatrix& res)
{
eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
res.setStorageType(SLU_DN);
res.setScalarType<Scalar>();
res.Mtype = SLU_GE;
res.nrow = mat.rows();
res.ncol = mat.cols();
res.storage.lda = mat.outerStride();
res.storage.values = mat.data();
}
};
template<typename Derived>
struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
{
typedef Derived MatrixType;
static void run(MatrixType& mat, SluMatrix& res)
{
if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
{
res.setStorageType(SLU_NR);
res.nrow = mat.cols();
res.ncol = mat.rows();
}
else
{
res.setStorageType(SLU_NC);
res.nrow = mat.rows();
res.ncol = mat.cols();
}
res.Mtype = SLU_GE;
res.storage.nnz = mat.nonZeros();
res.storage.values = mat.valuePtr();
res.storage.innerInd = mat.innerIndexPtr();
res.storage.outerInd = mat.outerIndexPtr();
res.setScalarType<typename MatrixType::Scalar>();
// FIXME the following is not very accurate
if (MatrixType::Flags & Upper)
res.Mtype = SLU_TRU;
if (MatrixType::Flags & Lower)
res.Mtype = SLU_TRL;
eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
}
};
namespace internal {
template<typename MatrixType>
SluMatrix asSluMatrix(MatrixType& mat)
{
return SluMatrix::Map(mat);
}
/** View a Super LU matrix as an Eigen expression */
template<typename Scalar, int Flags, typename Index>
MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
{
eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
|| (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
return MappedSparseMatrix<Scalar,Flags,Index>(
sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],
sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
}
} // end namespace internal
/** \ingroup SuperLUSupport_Module
* \class SuperLUBase
* \brief The base class for the direct and incomplete LU factorization of SuperLU
*/
template<typename _MatrixType, typename Derived>
class SuperLUBase : public SparseSolverBase<Derived>
{
protected:
typedef SparseSolverBase<Derived> Base;
using Base::derived;
using Base::m_isInitialized;
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
typedef Map<PermutationMatrix<Dynamic,Dynamic,int> > PermutationMap;
typedef SparseMatrix<Scalar> LUMatrixType;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public:
SuperLUBase() {}
~SuperLUBase()
{
clearFactors();
}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
/** \returns a reference to the Super LU option object to configure the Super LU algorithms. */
inline superlu_options_t& options() { return m_sluOptions; }
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
/** Computes the sparse Cholesky decomposition of \a matrix */
void compute(const MatrixType& matrix)
{
derived().analyzePattern(matrix);
derived().factorize(matrix);
}
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
* \sa factorize()
*/
void analyzePattern(const MatrixType& /*matrix*/)
{
m_isInitialized = true;
m_info = Success;
m_analysisIsOk = true;
m_factorizationIsOk = false;
}
template<typename Stream>
void dumpMemory(Stream& /*s*/)
{}
protected:
void initFactorization(const MatrixType& a)
{
set_default_options(&this->m_sluOptions);
const Index size = a.rows();
m_matrix = a;
m_sluA = internal::asSluMatrix(m_matrix);
clearFactors();
m_p.resize(size);
m_q.resize(size);
m_sluRscale.resize(size);
m_sluCscale.resize(size);
m_sluEtree.resize(size);
// set empty B and X
m_sluB.setStorageType(SLU_DN);
m_sluB.setScalarType<Scalar>();
m_sluB.Mtype = SLU_GE;
m_sluB.storage.values = 0;
m_sluB.nrow = 0;
m_sluB.ncol = 0;
m_sluB.storage.lda = internal::convert_index<int>(size);
m_sluX = m_sluB;
m_extractedDataAreDirty = true;
}
void init()
{
m_info = InvalidInput;
m_isInitialized = false;
m_sluL.Store = 0;
m_sluU.Store = 0;
}
void extractData() const;
void clearFactors()
{
if(m_sluL.Store)
Destroy_SuperNode_Matrix(&m_sluL);
if(m_sluU.Store)
Destroy_CompCol_Matrix(&m_sluU);
m_sluL.Store = 0;
m_sluU.Store = 0;
memset(&m_sluL,0,sizeof m_sluL);
memset(&m_sluU,0,sizeof m_sluU);
}
// cached data to reduce reallocation, etc.
mutable LUMatrixType m_l;
mutable LUMatrixType m_u;
mutable IntColVectorType m_p;
mutable IntRowVectorType m_q;
mutable LUMatrixType m_matrix; // copy of the factorized matrix
mutable SluMatrix m_sluA;
mutable SuperMatrix m_sluL, m_sluU;
mutable SluMatrix m_sluB, m_sluX;
mutable SuperLUStat_t m_sluStat;
mutable superlu_options_t m_sluOptions;
mutable std::vector<int> m_sluEtree;
mutable Matrix<RealScalar,Dynamic,1> m_sluRscale, m_sluCscale;
mutable Matrix<RealScalar,Dynamic,1> m_sluFerr, m_sluBerr;
mutable char m_sluEqued;
mutable ComputationInfo m_info;
int m_factorizationIsOk;
int m_analysisIsOk;
mutable bool m_extractedDataAreDirty;
private:
SuperLUBase(SuperLUBase& ) { }
};
/** \ingroup SuperLUSupport_Module
* \class SuperLU
* \brief A sparse direct LU factorization and solver based on the SuperLU library
*
* This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
* using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices
* X and B can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
*
* \warning This class is only for the 4.x versions of SuperLU. The 3.x and 5.x versions are not supported.
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SparseLU
*/
template<typename _MatrixType>
class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
{
public:
typedef SuperLUBase<_MatrixType,SuperLU> Base;
typedef _MatrixType MatrixType;
typedef typename Base::Scalar Scalar;
typedef typename Base::RealScalar RealScalar;
typedef typename Base::StorageIndex StorageIndex;
typedef typename Base::IntRowVectorType IntRowVectorType;
typedef typename Base::IntColVectorType IntColVectorType;
typedef typename Base::PermutationMap PermutationMap;
typedef typename Base::LUMatrixType LUMatrixType;
typedef TriangularView<LUMatrixType, Lower|UnitDiag> LMatrixType;
typedef TriangularView<LUMatrixType, Upper> UMatrixType;
public:
using Base::_solve_impl;
SuperLU() : Base() { init(); }
explicit SuperLU(const MatrixType& matrix) : Base()
{
init();
Base::compute(matrix);
}
~SuperLU()
{
}
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
{
m_info = InvalidInput;
m_isInitialized = false;
Base::analyzePattern(matrix);
}
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
*
* \sa analyzePattern()
*/
void factorize(const MatrixType& matrix);
/** \internal */
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
inline const LMatrixType& matrixL() const
{
if (m_extractedDataAreDirty) this->extractData();
return m_l;
}
inline const UMatrixType& matrixU() const
{
if (m_extractedDataAreDirty) this->extractData();
return m_u;
}
inline const IntColVectorType& permutationP() const
{
if (m_extractedDataAreDirty) this->extractData();
return m_p;
}
inline const IntRowVectorType& permutationQ() const
{
if (m_extractedDataAreDirty) this->extractData();
return m_q;
}
Scalar determinant() const;
protected:
using Base::m_matrix;
using Base::m_sluOptions;
using Base::m_sluA;
using Base::m_sluB;
using Base::m_sluX;
using Base::m_p;
using Base::m_q;
using Base::m_sluEtree;
using Base::m_sluEqued;
using Base::m_sluRscale;
using Base::m_sluCscale;
using Base::m_sluL;
using Base::m_sluU;
using Base::m_sluStat;
using Base::m_sluFerr;
using Base::m_sluBerr;
using Base::m_l;
using Base::m_u;
using Base::m_analysisIsOk;
using Base::m_factorizationIsOk;
using Base::m_extractedDataAreDirty;
using Base::m_isInitialized;
using Base::m_info;
void init()
{
Base::init();
set_default_options(&this->m_sluOptions);
m_sluOptions.PrintStat = NO;
m_sluOptions.ConditionNumber = NO;
m_sluOptions.Trans = NOTRANS;
m_sluOptions.ColPerm = COLAMD;
}
private:
SuperLU(SuperLU& ) { }
};
template<typename MatrixType>
void SuperLU<MatrixType>::factorize(const MatrixType& a)
{
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
if(!m_analysisIsOk)
{
m_info = InvalidInput;
return;
}
this->initFactorization(a);
m_sluOptions.ColPerm = COLAMD;
int info = 0;
RealScalar recip_pivot_growth, rcond;
RealScalar ferr, berr;
StatInit(&m_sluStat);
SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
&m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
&m_sluL, &m_sluU,
NULL, 0,
&m_sluB, &m_sluX,
&recip_pivot_growth, &rcond,
&ferr, &berr,
&m_sluStat, &info, Scalar());
StatFree(&m_sluStat);
m_extractedDataAreDirty = true;
// FIXME how to better check for errors ???
m_info = info == 0 ? Success : NumericalIssue;
m_factorizationIsOk = true;
}
template<typename MatrixType>
template<typename Rhs,typename Dest>
void SuperLU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
{
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
const Index size = m_matrix.rows();
const Index rhsCols = b.cols();
eigen_assert(size==b.rows());
m_sluOptions.Trans = NOTRANS;
m_sluOptions.Fact = FACTORED;
m_sluOptions.IterRefine = NOREFINE;
m_sluFerr.resize(rhsCols);
m_sluBerr.resize(rhsCols);
Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b);
Ref<const Matrix<typename Dest::Scalar,Dynamic,Dynamic,ColMajor> > x_ref(x);
m_sluB = SluMatrix::Map(b_ref.const_cast_derived());
m_sluX = SluMatrix::Map(x_ref.const_cast_derived());
typename Rhs::PlainObject b_cpy;
if(m_sluEqued!='N')
{
b_cpy = b;
m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());
}
StatInit(&m_sluStat);
int info = 0;
RealScalar recip_pivot_growth, rcond;
SuperLU_gssvx(&m_sluOptions, &m_sluA,
m_q.data(), m_p.data(),
&m_sluEtree[0], &m_sluEqued,
&m_sluRscale[0], &m_sluCscale[0],
&m_sluL, &m_sluU,
NULL, 0,
&m_sluB, &m_sluX,
&recip_pivot_growth, &rcond,
&m_sluFerr[0], &m_sluBerr[0],
&m_sluStat, &info, Scalar());
StatFree(&m_sluStat);
if(x.derived().data() != x_ref.data())
x = x_ref;
m_info = info==0 ? Success : NumericalIssue;
}
// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,
//
// Copyright (c) 1994 by Xerox Corporation. All rights reserved.
//
// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
//
template<typename MatrixType, typename Derived>
void SuperLUBase<MatrixType,Derived>::extractData() const
{
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()");
if (m_extractedDataAreDirty)
{
int upper;
int fsupc, istart, nsupr;
int lastl = 0, lastu = 0;
SCformat *Lstore = static_cast<SCformat*>(m_sluL.Store);
NCformat *Ustore = static_cast<NCformat*>(m_sluU.Store);
Scalar *SNptr;
const Index size = m_matrix.rows();
m_l.resize(size,size);
m_l.resizeNonZeros(Lstore->nnz);
m_u.resize(size,size);
m_u.resizeNonZeros(Ustore->nnz);
int* Lcol = m_l.outerIndexPtr();
int* Lrow = m_l.innerIndexPtr();
Scalar* Lval = m_l.valuePtr();
int* Ucol = m_u.outerIndexPtr();
int* Urow = m_u.innerIndexPtr();
Scalar* Uval = m_u.valuePtr();
Ucol[0] = 0;
Ucol[0] = 0;
/* for each supernode */
for (int k = 0; k <= Lstore->nsuper; ++k)
{
fsupc = L_FST_SUPC(k);
istart = L_SUB_START(fsupc);
nsupr = L_SUB_START(fsupc+1) - istart;
upper = 1;
/* for each column in the supernode */
for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
{
SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];
/* Extract U */
for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)
{
Uval[lastu] = ((Scalar*)Ustore->nzval)[i];
/* Matlab doesn't like explicit zero. */
if (Uval[lastu] != 0.0)
Urow[lastu++] = U_SUB(i);
}
for (int i = 0; i < upper; ++i)
{
/* upper triangle in the supernode */
Uval[lastu] = SNptr[i];
/* Matlab doesn't like explicit zero. */
if (Uval[lastu] != 0.0)
Urow[lastu++] = L_SUB(istart+i);
}
Ucol[j+1] = lastu;
/* Extract L */
Lval[lastl] = 1.0; /* unit diagonal */
Lrow[lastl++] = L_SUB(istart + upper - 1);
for (int i = upper; i < nsupr; ++i)
{
Lval[lastl] = SNptr[i];
/* Matlab doesn't like explicit zero. */
if (Lval[lastl] != 0.0)
Lrow[lastl++] = L_SUB(istart+i);
}
Lcol[j+1] = lastl;
++upper;
} /* for j ... */
} /* for k ... */
// squeeze the matrices :
m_l.resizeNonZeros(lastl);
m_u.resizeNonZeros(lastu);
m_extractedDataAreDirty = false;
}
}
template<typename MatrixType>
typename SuperLU<MatrixType>::Scalar SuperLU<MatrixType>::determinant() const
{
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()");
if (m_extractedDataAreDirty)
this->extractData();
Scalar det = Scalar(1);
for (int j=0; j<m_u.cols(); ++j)
{
if (m_u.outerIndexPtr()[j+1]-m_u.outerIndexPtr()[j] > 0)
{
int lastId = m_u.outerIndexPtr()[j+1]-1;
eigen_assert(m_u.innerIndexPtr()[lastId]<=j);
if (m_u.innerIndexPtr()[lastId]==j)
det *= m_u.valuePtr()[lastId];
}
}
if(PermutationMap(m_p.data(),m_p.size()).determinant()*PermutationMap(m_q.data(),m_q.size()).determinant()<0)
det = -det;
if(m_sluEqued!='N')
return det/m_sluRscale.prod()/m_sluCscale.prod();
else
return det;
}
#ifdef EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_SUPERLU_HAS_ILU
#endif
#ifdef EIGEN_SUPERLU_HAS_ILU
/** \ingroup SuperLUSupport_Module
* \class SuperILU
* \brief A sparse direct \b incomplete LU factorization and solver based on the SuperLU library
*
* This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization
* using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers.
*
* \warning This class is only for the 4.x versions of SuperLU. The 3.x and 5.x versions are not supported.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class IncompleteLUT, class ConjugateGradient, class BiCGSTAB
*/
template<typename _MatrixType>
class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >
{
public:
typedef SuperLUBase<_MatrixType,SuperILU> Base;
typedef _MatrixType MatrixType;
typedef typename Base::Scalar Scalar;
typedef typename Base::RealScalar RealScalar;
public:
using Base::_solve_impl;
SuperILU() : Base() { init(); }
SuperILU(const MatrixType& matrix) : Base()
{
init();
Base::compute(matrix);
}
~SuperILU()
{
}
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
{
Base::analyzePattern(matrix);
}
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
*
* \sa analyzePattern()
*/
void factorize(const MatrixType& matrix);
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal */
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
#endif // EIGEN_PARSED_BY_DOXYGEN
protected:
using Base::m_matrix;
using Base::m_sluOptions;
using Base::m_sluA;
using Base::m_sluB;
using Base::m_sluX;
using Base::m_p;
using Base::m_q;
using Base::m_sluEtree;
using Base::m_sluEqued;
using Base::m_sluRscale;
using Base::m_sluCscale;
using Base::m_sluL;
using Base::m_sluU;
using Base::m_sluStat;
using Base::m_sluFerr;
using Base::m_sluBerr;
using Base::m_l;
using Base::m_u;
using Base::m_analysisIsOk;
using Base::m_factorizationIsOk;
using Base::m_extractedDataAreDirty;
using Base::m_isInitialized;
using Base::m_info;
void init()
{
Base::init();
ilu_set_default_options(&m_sluOptions);
m_sluOptions.PrintStat = NO;
m_sluOptions.ConditionNumber = NO;
m_sluOptions.Trans = NOTRANS;
m_sluOptions.ColPerm = MMD_AT_PLUS_A;
// no attempt to preserve column sum
m_sluOptions.ILU_MILU = SILU;
// only basic ILU(k) support -- no direct control over memory consumption
// better to use ILU_DropRule = DROP_BASIC | DROP_AREA
// and set ILU_FillFactor to max memory growth
m_sluOptions.ILU_DropRule = DROP_BASIC;
m_sluOptions.ILU_DropTol = NumTraits<Scalar>::dummy_precision()*10;
}
private:
SuperILU(SuperILU& ) { }
};
template<typename MatrixType>
void SuperILU<MatrixType>::factorize(const MatrixType& a)
{
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
if(!m_analysisIsOk)
{
m_info = InvalidInput;
return;
}
this->initFactorization(a);
int info = 0;
RealScalar recip_pivot_growth, rcond;
StatInit(&m_sluStat);
SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
&m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
&m_sluL, &m_sluU,
NULL, 0,
&m_sluB, &m_sluX,
&recip_pivot_growth, &rcond,
&m_sluStat, &info, Scalar());
StatFree(&m_sluStat);
// FIXME how to better check for errors ???
m_info = info == 0 ? Success : NumericalIssue;
m_factorizationIsOk = true;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatrixType>
template<typename Rhs,typename Dest>
void SuperILU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
{
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
const int size = m_matrix.rows();
const int rhsCols = b.cols();
eigen_assert(size==b.rows());
m_sluOptions.Trans = NOTRANS;
m_sluOptions.Fact = FACTORED;
m_sluOptions.IterRefine = NOREFINE;
m_sluFerr.resize(rhsCols);
m_sluBerr.resize(rhsCols);
Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b);
Ref<const Matrix<typename Dest::Scalar,Dynamic,Dynamic,ColMajor> > x_ref(x);
m_sluB = SluMatrix::Map(b_ref.const_cast_derived());
m_sluX = SluMatrix::Map(x_ref.const_cast_derived());
typename Rhs::PlainObject b_cpy;
if(m_sluEqued!='N')
{
b_cpy = b;
m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());
}
int info = 0;
RealScalar recip_pivot_growth, rcond;
StatInit(&m_sluStat);
SuperLU_gsisx(&m_sluOptions, &m_sluA,
m_q.data(), m_p.data(),
&m_sluEtree[0], &m_sluEqued,
&m_sluRscale[0], &m_sluCscale[0],
&m_sluL, &m_sluU,
NULL, 0,
&m_sluB, &m_sluX,
&recip_pivot_growth, &rcond,
&m_sluStat, &info, Scalar());
StatFree(&m_sluStat);
if(x.derived().data() != x_ref.data())
x = x_ref;
m_info = info==0 ? Success : NumericalIssue;
}
#endif
#endif
} // end namespace Eigen
#endif // EIGEN_SUPERLUSUPPORT_H
| 34,341 | 32.406615 | 183 |
h
|
abess
|
abess-master/python/include/Eigen/src/UmfPackSupport/UmfPackSupport.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2011 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_UMFPACKSUPPORT_H
#define EIGEN_UMFPACKSUPPORT_H
namespace Eigen {
/* TODO extract L, extract U, compute det, etc... */
// generic double/complex<double> wrapper functions:
inline void umfpack_defaults(double control[UMFPACK_CONTROL], double)
{ umfpack_di_defaults(control); }
inline void umfpack_defaults(double control[UMFPACK_CONTROL], std::complex<double>)
{ umfpack_zi_defaults(control); }
inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], double)
{ umfpack_di_report_info(control, info);}
inline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], std::complex<double>)
{ umfpack_zi_report_info(control, info);}
inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, double)
{ umfpack_di_report_status(control, status);}
inline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, std::complex<double>)
{ umfpack_zi_report_status(control, status);}
inline void umfpack_report_control(double control[UMFPACK_CONTROL], double)
{ umfpack_di_report_control(control);}
inline void umfpack_report_control(double control[UMFPACK_CONTROL], std::complex<double>)
{ umfpack_zi_report_control(control);}
inline void umfpack_free_numeric(void **Numeric, double)
{ umfpack_di_free_numeric(Numeric); *Numeric = 0; }
inline void umfpack_free_numeric(void **Numeric, std::complex<double>)
{ umfpack_zi_free_numeric(Numeric); *Numeric = 0; }
inline void umfpack_free_symbolic(void **Symbolic, double)
{ umfpack_di_free_symbolic(Symbolic); *Symbolic = 0; }
inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)
{ umfpack_zi_free_symbolic(Symbolic); *Symbolic = 0; }
inline int umfpack_symbolic(int n_row,int n_col,
const int Ap[], const int Ai[], const double Ax[], void **Symbolic,
const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
{
return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);
}
inline int umfpack_symbolic(int n_row,int n_col,
const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,
const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
{
return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info);
}
inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
void *Symbolic, void **Numeric,
const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
{
return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);
}
inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<double> Ax[],
void *Symbolic, void **Numeric,
const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
{
return umfpack_zi_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
}
inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
double X[], const double B[], void *Numeric,
const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
{
return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);
}
inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex<double> Ax[],
std::complex<double> X[], const std::complex<double> B[], void *Numeric,
const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
{
return umfpack_zi_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info);
}
inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
{
return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
}
inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex<double>)
{
return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
}
inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],
int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)
{
return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);
}
inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],
int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)
{
double& lx0_real = numext::real_ref(Lx[0]);
double& ux0_real = numext::real_ref(Ux[0]);
double& dx0_real = numext::real_ref(Dx[0]);
return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
}
inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
{
return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);
}
inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
{
double& mx_real = numext::real_ref(*Mx);
return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
}
/** \ingroup UmfPackSupport_Module
* \brief A sparse LU factorization and solver based on UmfPack
*
* This class allows to solve for A.X = B sparse linear problems via a LU factorization
* using the UmfPack library. The sparse matrix A must be squared and full rank.
* The vectors or matrices X and B can be either dense or sparse.
*
* \warning The input matrix A should be in a \b compressed and \b column-major form.
* Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SparseLU
*/
template<typename _MatrixType>
class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
{
protected:
typedef SparseSolverBase<UmfPackLU<_MatrixType> > Base;
using Base::m_isInitialized;
public:
using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
typedef SparseMatrix<Scalar> LUMatrixType;
typedef SparseMatrix<Scalar,ColMajor,int> UmfpackMatrixType;
typedef Ref<const UmfpackMatrixType, StandardCompressedFormat> UmfpackMatrixRef;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public:
typedef Array<double, UMFPACK_CONTROL, 1> UmfpackControl;
typedef Array<double, UMFPACK_INFO, 1> UmfpackInfo;
UmfPackLU()
: m_dummy(0,0), mp_matrix(m_dummy)
{
init();
}
template<typename InputMatrixType>
explicit UmfPackLU(const InputMatrixType& matrix)
: mp_matrix(matrix)
{
init();
compute(matrix);
}
~UmfPackLU()
{
if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
}
inline Index rows() const { return mp_matrix.rows(); }
inline Index cols() const { return mp_matrix.cols(); }
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
inline const LUMatrixType& matrixL() const
{
if (m_extractedDataAreDirty) extractData();
return m_l;
}
inline const LUMatrixType& matrixU() const
{
if (m_extractedDataAreDirty) extractData();
return m_u;
}
inline const IntColVectorType& permutationP() const
{
if (m_extractedDataAreDirty) extractData();
return m_p;
}
inline const IntRowVectorType& permutationQ() const
{
if (m_extractedDataAreDirty) extractData();
return m_q;
}
/** Computes the sparse Cholesky decomposition of \a matrix
* Note that the matrix should be column-major, and in compressed format for best performance.
* \sa SparseMatrix::makeCompressed().
*/
template<typename InputMatrixType>
void compute(const InputMatrixType& matrix)
{
if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
grab(matrix.derived());
analyzePattern_impl();
factorize_impl();
}
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
* \sa factorize(), compute()
*/
template<typename InputMatrixType>
void analyzePattern(const InputMatrixType& matrix)
{
if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
grab(matrix.derived());
analyzePattern_impl();
}
/** Provides the return status code returned by UmfPack during the numeric
* factorization.
*
* \sa factorize(), compute()
*/
inline int umfpackFactorizeReturncode() const
{
eigen_assert(m_numeric && "UmfPackLU: you must first call factorize()");
return m_fact_errorCode;
}
/** Provides access to the control settings array used by UmfPack.
*
* If this array contains NaN's, the default values are used.
*
* See UMFPACK documentation for details.
*/
inline const UmfpackControl& umfpackControl() const
{
return m_control;
}
/** Provides access to the control settings array used by UmfPack.
*
* If this array contains NaN's, the default values are used.
*
* See UMFPACK documentation for details.
*/
inline UmfpackControl& umfpackControl()
{
return m_control;
}
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must has the same sparcity than the matrix on which the pattern anylysis has been performed.
*
* \sa analyzePattern(), compute()
*/
template<typename InputMatrixType>
void factorize(const InputMatrixType& matrix)
{
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
if(m_numeric)
umfpack_free_numeric(&m_numeric,Scalar());
grab(matrix.derived());
factorize_impl();
}
/** Prints the current UmfPack control settings.
*
* \sa umfpackControl()
*/
void umfpackReportControl()
{
umfpack_report_control(m_control.data(), Scalar());
}
/** Prints statistics collected by UmfPack.
*
* \sa analyzePattern(), compute()
*/
void umfpackReportInfo()
{
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
umfpack_report_info(m_control.data(), m_umfpackInfo.data(), Scalar());
}
/** Prints the status of the previous factorization operation performed by UmfPack (symbolic or numerical factorization).
*
* \sa analyzePattern(), compute()
*/
void umfpackReportStatus() {
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
umfpack_report_status(m_control.data(), m_fact_errorCode, Scalar());
}
/** \internal */
template<typename BDerived,typename XDerived>
bool _solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const;
Scalar determinant() const;
void extractData() const;
protected:
void init()
{
m_info = InvalidInput;
m_isInitialized = false;
m_numeric = 0;
m_symbolic = 0;
m_extractedDataAreDirty = true;
umfpack_defaults(m_control.data(), Scalar());
}
void analyzePattern_impl()
{
m_fact_errorCode = umfpack_symbolic(internal::convert_index<int>(mp_matrix.rows()),
internal::convert_index<int>(mp_matrix.cols()),
mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),
&m_symbolic, m_control.data(), m_umfpackInfo.data());
m_isInitialized = true;
m_info = m_fact_errorCode ? InvalidInput : Success;
m_analysisIsOk = true;
m_factorizationIsOk = false;
m_extractedDataAreDirty = true;
}
void factorize_impl()
{
m_fact_errorCode = umfpack_numeric(mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),
m_symbolic, &m_numeric, m_control.data(), m_umfpackInfo.data());
m_info = m_fact_errorCode == UMFPACK_OK ? Success : NumericalIssue;
m_factorizationIsOk = true;
m_extractedDataAreDirty = true;
}
template<typename MatrixDerived>
void grab(const EigenBase<MatrixDerived> &A)
{
mp_matrix.~UmfpackMatrixRef();
::new (&mp_matrix) UmfpackMatrixRef(A.derived());
}
void grab(const UmfpackMatrixRef &A)
{
if(&(A.derived()) != &mp_matrix)
{
mp_matrix.~UmfpackMatrixRef();
::new (&mp_matrix) UmfpackMatrixRef(A);
}
}
// cached data to reduce reallocation, etc.
mutable LUMatrixType m_l;
int m_fact_errorCode;
UmfpackControl m_control;
mutable UmfpackInfo m_umfpackInfo;
mutable LUMatrixType m_u;
mutable IntColVectorType m_p;
mutable IntRowVectorType m_q;
UmfpackMatrixType m_dummy;
UmfpackMatrixRef mp_matrix;
void* m_numeric;
void* m_symbolic;
mutable ComputationInfo m_info;
int m_factorizationIsOk;
int m_analysisIsOk;
mutable bool m_extractedDataAreDirty;
private:
UmfPackLU(const UmfPackLU& ) { }
};
template<typename MatrixType>
void UmfPackLU<MatrixType>::extractData() const
{
if (m_extractedDataAreDirty)
{
// get size of the data
int lnz, unz, rows, cols, nz_udiag;
umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
// allocate data
m_l.resize(rows,(std::min)(rows,cols));
m_l.resizeNonZeros(lnz);
m_u.resize((std::min)(rows,cols),cols);
m_u.resizeNonZeros(unz);
m_p.resize(rows);
m_q.resize(cols);
// extract
umfpack_get_numeric(m_l.outerIndexPtr(), m_l.innerIndexPtr(), m_l.valuePtr(),
m_u.outerIndexPtr(), m_u.innerIndexPtr(), m_u.valuePtr(),
m_p.data(), m_q.data(), 0, 0, 0, m_numeric);
m_extractedDataAreDirty = false;
}
}
template<typename MatrixType>
typename UmfPackLU<MatrixType>::Scalar UmfPackLU<MatrixType>::determinant() const
{
Scalar det;
umfpack_get_determinant(&det, 0, m_numeric, 0);
return det;
}
template<typename MatrixType>
template<typename BDerived,typename XDerived>
bool UmfPackLU<MatrixType>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const
{
Index rhsCols = b.cols();
eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet");
eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet");
eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve");
int errorCode;
Scalar* x_ptr = 0;
Matrix<Scalar,Dynamic,1> x_tmp;
if(x.innerStride()!=1)
{
x_tmp.resize(x.rows());
x_ptr = x_tmp.data();
}
for (int j=0; j<rhsCols; ++j)
{
if(x.innerStride()==1)
x_ptr = &x.col(j).coeffRef(0);
errorCode = umfpack_solve(UMFPACK_A,
mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),
x_ptr, &b.const_cast_derived().col(j).coeffRef(0), m_numeric, m_control.data(), m_umfpackInfo.data());
if(x.innerStride()!=1)
x.col(j) = x_tmp;
if (errorCode!=0)
return false;
}
return true;
}
} // end namespace Eigen
#endif // EIGEN_UMFPACKSUPPORT_H
| 17,202 | 32.930966 | 137 |
h
|
abess
|
abess-master/python/include/Eigen/src/misc/Image.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MISC_IMAGE_H
#define EIGEN_MISC_IMAGE_H
namespace Eigen {
namespace internal {
/** \class image_retval_base
*
*/
template<typename DecompositionType>
struct traits<image_retval_base<DecompositionType> >
{
typedef typename DecompositionType::MatrixType MatrixType;
typedef Matrix<
typename MatrixType::Scalar,
MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose
// dimension is the number of rows of the original matrix
Dynamic, // we don't know at compile time the dimension of the image (the rank)
MatrixType::Options,
MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,
MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns.
> ReturnType;
};
template<typename _DecompositionType> struct image_retval_base
: public ReturnByValue<image_retval_base<_DecompositionType> >
{
typedef _DecompositionType DecompositionType;
typedef typename DecompositionType::MatrixType MatrixType;
typedef ReturnByValue<image_retval_base> Base;
image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix)
: m_dec(dec), m_rank(dec.rank()),
m_cols(m_rank == 0 ? 1 : m_rank),
m_originalMatrix(originalMatrix)
{}
inline Index rows() const { return m_dec.rows(); }
inline Index cols() const { return m_cols; }
inline Index rank() const { return m_rank; }
inline const DecompositionType& dec() const { return m_dec; }
inline const MatrixType& originalMatrix() const { return m_originalMatrix; }
template<typename Dest> inline void evalTo(Dest& dst) const
{
static_cast<const image_retval<DecompositionType>*>(this)->evalTo(dst);
}
protected:
const DecompositionType& m_dec;
Index m_rank, m_cols;
const MatrixType& m_originalMatrix;
};
} // end namespace internal
#define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \
typedef typename DecompositionType::MatrixType MatrixType; \
typedef typename MatrixType::Scalar Scalar; \
typedef typename MatrixType::RealScalar RealScalar; \
typedef Eigen::internal::image_retval_base<DecompositionType> Base; \
using Base::dec; \
using Base::originalMatrix; \
using Base::rank; \
using Base::rows; \
using Base::cols; \
image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \
: Base(dec, originalMatrix) {}
} // end namespace Eigen
#endif // EIGEN_MISC_IMAGE_H
| 2,913 | 34.108434 | 107 |
h
|
abess
|
abess-master/python/include/Eigen/src/misc/Kernel.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MISC_KERNEL_H
#define EIGEN_MISC_KERNEL_H
namespace Eigen {
namespace internal {
/** \class kernel_retval_base
*
*/
template<typename DecompositionType>
struct traits<kernel_retval_base<DecompositionType> >
{
typedef typename DecompositionType::MatrixType MatrixType;
typedef Matrix<
typename MatrixType::Scalar,
MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix"
// is the number of cols of the original matrix
// so that the product "matrix * kernel = zero" makes sense
Dynamic, // we don't know at compile-time the dimension of the kernel
MatrixType::Options,
MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter
MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space,
// whose dimension is the number of columns of the original matrix
> ReturnType;
};
template<typename _DecompositionType> struct kernel_retval_base
: public ReturnByValue<kernel_retval_base<_DecompositionType> >
{
typedef _DecompositionType DecompositionType;
typedef ReturnByValue<kernel_retval_base> Base;
explicit kernel_retval_base(const DecompositionType& dec)
: m_dec(dec),
m_rank(dec.rank()),
m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank)
{}
inline Index rows() const { return m_dec.cols(); }
inline Index cols() const { return m_cols; }
inline Index rank() const { return m_rank; }
inline const DecompositionType& dec() const { return m_dec; }
template<typename Dest> inline void evalTo(Dest& dst) const
{
static_cast<const kernel_retval<DecompositionType>*>(this)->evalTo(dst);
}
protected:
const DecompositionType& m_dec;
Index m_rank, m_cols;
};
} // end namespace internal
#define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \
typedef typename DecompositionType::MatrixType MatrixType; \
typedef typename MatrixType::Scalar Scalar; \
typedef typename MatrixType::RealScalar RealScalar; \
typedef Eigen::internal::kernel_retval_base<DecompositionType> Base; \
using Base::dec; \
using Base::rank; \
using Base::rows; \
using Base::cols; \
kernel_retval(const DecompositionType& dec) : Base(dec) {}
} // end namespace Eigen
#endif // EIGEN_MISC_KERNEL_H
| 2,742 | 33.2875 | 103 |
h
|
abess
|
abess-master/python/include/Eigen/src/misc/RealSvd2x2.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <[email protected]>
// Copyright (C) 2013-2016 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REALSVD2X2_H
#define EIGEN_REALSVD2X2_H
namespace Eigen {
namespace internal {
template<typename MatrixType, typename RealScalar, typename Index>
void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> *j_left,
JacobiRotation<RealScalar> *j_right)
{
using std::sqrt;
using std::abs;
Matrix<RealScalar,2,2> m;
m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
JacobiRotation<RealScalar> rot1;
RealScalar t = m.coeff(0,0) + m.coeff(1,1);
RealScalar d = m.coeff(1,0) - m.coeff(0,1);
if(abs(d) < (std::numeric_limits<RealScalar>::min)())
{
rot1.s() = RealScalar(0);
rot1.c() = RealScalar(1);
}
else
{
// If d!=0, then t/d cannot overflow because the magnitude of the
// entries forming d are not too small compared to the ones forming t.
RealScalar u = t / d;
RealScalar tmp = sqrt(RealScalar(1) + numext::abs2(u));
rot1.s() = RealScalar(1) / tmp;
rot1.c() = u / tmp;
}
m.applyOnTheLeft(0,1,rot1);
j_right->makeJacobi(m,0,1);
*j_left = rot1 * j_right->transpose();
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_REALSVD2X2_H
| 1,748 | 30.232143 | 74 |
h
|
abess
|
abess-master/python/include/Eigen/src/misc/blas.h
|
#ifndef BLAS_H
#define BLAS_H
#ifdef __cplusplus
extern "C"
{
#endif
#define BLASFUNC(FUNC) FUNC##_
#ifdef __WIN64__
typedef long long BLASLONG;
typedef unsigned long long BLASULONG;
#else
typedef long BLASLONG;
typedef unsigned long BLASULONG;
#endif
int BLASFUNC(xerbla)(const char *, int *info, int);
float BLASFUNC(sdot) (int *, float *, int *, float *, int *);
float BLASFUNC(sdsdot)(int *, float *, float *, int *, float *, int *);
double BLASFUNC(dsdot) (int *, float *, int *, float *, int *);
double BLASFUNC(ddot) (int *, double *, int *, double *, int *);
double BLASFUNC(qdot) (int *, double *, int *, double *, int *);
int BLASFUNC(cdotuw) (int *, float *, int *, float *, int *, float*);
int BLASFUNC(cdotcw) (int *, float *, int *, float *, int *, float*);
int BLASFUNC(zdotuw) (int *, double *, int *, double *, int *, double*);
int BLASFUNC(zdotcw) (int *, double *, int *, double *, int *, double*);
int BLASFUNC(saxpy) (const int *, const float *, const float *, const int *, float *, const int *);
int BLASFUNC(daxpy) (const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(qaxpy) (const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(caxpy) (const int *, const float *, const float *, const int *, float *, const int *);
int BLASFUNC(zaxpy) (const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(xaxpy) (const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(caxpyc)(const int *, const float *, const float *, const int *, float *, const int *);
int BLASFUNC(zaxpyc)(const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(xaxpyc)(const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(scopy) (int *, float *, int *, float *, int *);
int BLASFUNC(dcopy) (int *, double *, int *, double *, int *);
int BLASFUNC(qcopy) (int *, double *, int *, double *, int *);
int BLASFUNC(ccopy) (int *, float *, int *, float *, int *);
int BLASFUNC(zcopy) (int *, double *, int *, double *, int *);
int BLASFUNC(xcopy) (int *, double *, int *, double *, int *);
int BLASFUNC(sswap) (int *, float *, int *, float *, int *);
int BLASFUNC(dswap) (int *, double *, int *, double *, int *);
int BLASFUNC(qswap) (int *, double *, int *, double *, int *);
int BLASFUNC(cswap) (int *, float *, int *, float *, int *);
int BLASFUNC(zswap) (int *, double *, int *, double *, int *);
int BLASFUNC(xswap) (int *, double *, int *, double *, int *);
float BLASFUNC(sasum) (int *, float *, int *);
float BLASFUNC(scasum)(int *, float *, int *);
double BLASFUNC(dasum) (int *, double *, int *);
double BLASFUNC(qasum) (int *, double *, int *);
double BLASFUNC(dzasum)(int *, double *, int *);
double BLASFUNC(qxasum)(int *, double *, int *);
int BLASFUNC(isamax)(int *, float *, int *);
int BLASFUNC(idamax)(int *, double *, int *);
int BLASFUNC(iqamax)(int *, double *, int *);
int BLASFUNC(icamax)(int *, float *, int *);
int BLASFUNC(izamax)(int *, double *, int *);
int BLASFUNC(ixamax)(int *, double *, int *);
int BLASFUNC(ismax) (int *, float *, int *);
int BLASFUNC(idmax) (int *, double *, int *);
int BLASFUNC(iqmax) (int *, double *, int *);
int BLASFUNC(icmax) (int *, float *, int *);
int BLASFUNC(izmax) (int *, double *, int *);
int BLASFUNC(ixmax) (int *, double *, int *);
int BLASFUNC(isamin)(int *, float *, int *);
int BLASFUNC(idamin)(int *, double *, int *);
int BLASFUNC(iqamin)(int *, double *, int *);
int BLASFUNC(icamin)(int *, float *, int *);
int BLASFUNC(izamin)(int *, double *, int *);
int BLASFUNC(ixamin)(int *, double *, int *);
int BLASFUNC(ismin)(int *, float *, int *);
int BLASFUNC(idmin)(int *, double *, int *);
int BLASFUNC(iqmin)(int *, double *, int *);
int BLASFUNC(icmin)(int *, float *, int *);
int BLASFUNC(izmin)(int *, double *, int *);
int BLASFUNC(ixmin)(int *, double *, int *);
float BLASFUNC(samax) (int *, float *, int *);
double BLASFUNC(damax) (int *, double *, int *);
double BLASFUNC(qamax) (int *, double *, int *);
float BLASFUNC(scamax)(int *, float *, int *);
double BLASFUNC(dzamax)(int *, double *, int *);
double BLASFUNC(qxamax)(int *, double *, int *);
float BLASFUNC(samin) (int *, float *, int *);
double BLASFUNC(damin) (int *, double *, int *);
double BLASFUNC(qamin) (int *, double *, int *);
float BLASFUNC(scamin)(int *, float *, int *);
double BLASFUNC(dzamin)(int *, double *, int *);
double BLASFUNC(qxamin)(int *, double *, int *);
float BLASFUNC(smax) (int *, float *, int *);
double BLASFUNC(dmax) (int *, double *, int *);
double BLASFUNC(qmax) (int *, double *, int *);
float BLASFUNC(scmax) (int *, float *, int *);
double BLASFUNC(dzmax) (int *, double *, int *);
double BLASFUNC(qxmax) (int *, double *, int *);
float BLASFUNC(smin) (int *, float *, int *);
double BLASFUNC(dmin) (int *, double *, int *);
double BLASFUNC(qmin) (int *, double *, int *);
float BLASFUNC(scmin) (int *, float *, int *);
double BLASFUNC(dzmin) (int *, double *, int *);
double BLASFUNC(qxmin) (int *, double *, int *);
int BLASFUNC(sscal) (int *, float *, float *, int *);
int BLASFUNC(dscal) (int *, double *, double *, int *);
int BLASFUNC(qscal) (int *, double *, double *, int *);
int BLASFUNC(cscal) (int *, float *, float *, int *);
int BLASFUNC(zscal) (int *, double *, double *, int *);
int BLASFUNC(xscal) (int *, double *, double *, int *);
int BLASFUNC(csscal)(int *, float *, float *, int *);
int BLASFUNC(zdscal)(int *, double *, double *, int *);
int BLASFUNC(xqscal)(int *, double *, double *, int *);
float BLASFUNC(snrm2) (int *, float *, int *);
float BLASFUNC(scnrm2)(int *, float *, int *);
double BLASFUNC(dnrm2) (int *, double *, int *);
double BLASFUNC(qnrm2) (int *, double *, int *);
double BLASFUNC(dznrm2)(int *, double *, int *);
double BLASFUNC(qxnrm2)(int *, double *, int *);
int BLASFUNC(srot) (int *, float *, int *, float *, int *, float *, float *);
int BLASFUNC(drot) (int *, double *, int *, double *, int *, double *, double *);
int BLASFUNC(qrot) (int *, double *, int *, double *, int *, double *, double *);
int BLASFUNC(csrot) (int *, float *, int *, float *, int *, float *, float *);
int BLASFUNC(zdrot) (int *, double *, int *, double *, int *, double *, double *);
int BLASFUNC(xqrot) (int *, double *, int *, double *, int *, double *, double *);
int BLASFUNC(srotg) (float *, float *, float *, float *);
int BLASFUNC(drotg) (double *, double *, double *, double *);
int BLASFUNC(qrotg) (double *, double *, double *, double *);
int BLASFUNC(crotg) (float *, float *, float *, float *);
int BLASFUNC(zrotg) (double *, double *, double *, double *);
int BLASFUNC(xrotg) (double *, double *, double *, double *);
int BLASFUNC(srotmg)(float *, float *, float *, float *, float *);
int BLASFUNC(drotmg)(double *, double *, double *, double *, double *);
int BLASFUNC(srotm) (int *, float *, int *, float *, int *, float *);
int BLASFUNC(drotm) (int *, double *, int *, double *, int *, double *);
int BLASFUNC(qrotm) (int *, double *, int *, double *, int *, double *);
/* Level 2 routines */
int BLASFUNC(sger)(int *, int *, float *, float *, int *,
float *, int *, float *, int *);
int BLASFUNC(dger)(int *, int *, double *, double *, int *,
double *, int *, double *, int *);
int BLASFUNC(qger)(int *, int *, double *, double *, int *,
double *, int *, double *, int *);
int BLASFUNC(cgeru)(int *, int *, float *, float *, int *,
float *, int *, float *, int *);
int BLASFUNC(cgerc)(int *, int *, float *, float *, int *,
float *, int *, float *, int *);
int BLASFUNC(zgeru)(int *, int *, double *, double *, int *,
double *, int *, double *, int *);
int BLASFUNC(zgerc)(int *, int *, double *, double *, int *,
double *, int *, double *, int *);
int BLASFUNC(xgeru)(int *, int *, double *, double *, int *,
double *, int *, double *, int *);
int BLASFUNC(xgerc)(int *, int *, double *, double *, int *,
double *, int *, double *, int *);
int BLASFUNC(sgemv)(const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(dgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(qgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(cgemv)(const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(xgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(strsv) (const char *, const char *, const char *, const int *, const float *, const int *, float *, const int *);
int BLASFUNC(dtrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(qtrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(ctrsv) (const char *, const char *, const char *, const int *, const float *, const int *, float *, const int *);
int BLASFUNC(ztrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(xtrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(stpsv) (char *, char *, char *, int *, float *, float *, int *);
int BLASFUNC(dtpsv) (char *, char *, char *, int *, double *, double *, int *);
int BLASFUNC(qtpsv) (char *, char *, char *, int *, double *, double *, int *);
int BLASFUNC(ctpsv) (char *, char *, char *, int *, float *, float *, int *);
int BLASFUNC(ztpsv) (char *, char *, char *, int *, double *, double *, int *);
int BLASFUNC(xtpsv) (char *, char *, char *, int *, double *, double *, int *);
int BLASFUNC(strmv) (const char *, const char *, const char *, const int *, const float *, const int *, float *, const int *);
int BLASFUNC(dtrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(qtrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(ctrmv) (const char *, const char *, const char *, const int *, const float *, const int *, float *, const int *);
int BLASFUNC(ztrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(xtrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(stpmv) (char *, char *, char *, int *, float *, float *, int *);
int BLASFUNC(dtpmv) (char *, char *, char *, int *, double *, double *, int *);
int BLASFUNC(qtpmv) (char *, char *, char *, int *, double *, double *, int *);
int BLASFUNC(ctpmv) (char *, char *, char *, int *, float *, float *, int *);
int BLASFUNC(ztpmv) (char *, char *, char *, int *, double *, double *, int *);
int BLASFUNC(xtpmv) (char *, char *, char *, int *, double *, double *, int *);
int BLASFUNC(stbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *);
int BLASFUNC(dtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
int BLASFUNC(qtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
int BLASFUNC(ctbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *);
int BLASFUNC(ztbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
int BLASFUNC(xtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
int BLASFUNC(stbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *);
int BLASFUNC(dtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
int BLASFUNC(qtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
int BLASFUNC(ctbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *);
int BLASFUNC(ztbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
int BLASFUNC(xtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
int BLASFUNC(ssymv) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(dsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(qsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(sspmv) (char *, int *, float *, float *,
float *, int *, float *, float *, int *);
int BLASFUNC(dspmv) (char *, int *, double *, double *,
double *, int *, double *, double *, int *);
int BLASFUNC(qspmv) (char *, int *, double *, double *,
double *, int *, double *, double *, int *);
int BLASFUNC(ssyr) (const char *, const int *, const float *, const float *, const int *, float *, const int *);
int BLASFUNC(dsyr) (const char *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(qsyr) (const char *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(ssyr2) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, float *, const int *);
int BLASFUNC(dsyr2) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(qsyr2) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(csyr2) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, float *, const int *);
int BLASFUNC(zsyr2) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(xsyr2) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, double *, const int *);
int BLASFUNC(sspr) (char *, int *, float *, float *, int *,
float *);
int BLASFUNC(dspr) (char *, int *, double *, double *, int *,
double *);
int BLASFUNC(qspr) (char *, int *, double *, double *, int *,
double *);
int BLASFUNC(sspr2) (char *, int *, float *,
float *, int *, float *, int *, float *);
int BLASFUNC(dspr2) (char *, int *, double *,
double *, int *, double *, int *, double *);
int BLASFUNC(qspr2) (char *, int *, double *,
double *, int *, double *, int *, double *);
int BLASFUNC(cspr2) (char *, int *, float *,
float *, int *, float *, int *, float *);
int BLASFUNC(zspr2) (char *, int *, double *,
double *, int *, double *, int *, double *);
int BLASFUNC(xspr2) (char *, int *, double *,
double *, int *, double *, int *, double *);
int BLASFUNC(cher) (char *, int *, float *, float *, int *,
float *, int *);
int BLASFUNC(zher) (char *, int *, double *, double *, int *,
double *, int *);
int BLASFUNC(xher) (char *, int *, double *, double *, int *,
double *, int *);
int BLASFUNC(chpr) (char *, int *, float *, float *, int *, float *);
int BLASFUNC(zhpr) (char *, int *, double *, double *, int *, double *);
int BLASFUNC(xhpr) (char *, int *, double *, double *, int *, double *);
int BLASFUNC(cher2) (char *, int *, float *,
float *, int *, float *, int *, float *, int *);
int BLASFUNC(zher2) (char *, int *, double *,
double *, int *, double *, int *, double *, int *);
int BLASFUNC(xher2) (char *, int *, double *,
double *, int *, double *, int *, double *, int *);
int BLASFUNC(chpr2) (char *, int *, float *,
float *, int *, float *, int *, float *);
int BLASFUNC(zhpr2) (char *, int *, double *,
double *, int *, double *, int *, double *);
int BLASFUNC(xhpr2) (char *, int *, double *,
double *, int *, double *, int *, double *);
int BLASFUNC(chemv) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zhemv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(xhemv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(chpmv) (char *, int *, float *, float *,
float *, int *, float *, float *, int *);
int BLASFUNC(zhpmv) (char *, int *, double *, double *,
double *, int *, double *, double *, int *);
int BLASFUNC(xhpmv) (char *, int *, double *, double *,
double *, int *, double *, double *, int *);
int BLASFUNC(snorm)(char *, int *, int *, float *, int *);
int BLASFUNC(dnorm)(char *, int *, int *, double *, int *);
int BLASFUNC(cnorm)(char *, int *, int *, float *, int *);
int BLASFUNC(znorm)(char *, int *, int *, double *, int *);
int BLASFUNC(sgbmv)(char *, int *, int *, int *, int *, float *, float *, int *,
float *, int *, float *, float *, int *);
int BLASFUNC(dgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(qgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(cgbmv)(char *, int *, int *, int *, int *, float *, float *, int *,
float *, int *, float *, float *, int *);
int BLASFUNC(zgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(xgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(ssbmv)(char *, int *, int *, float *, float *, int *,
float *, int *, float *, float *, int *);
int BLASFUNC(dsbmv)(char *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(qsbmv)(char *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(csbmv)(char *, int *, int *, float *, float *, int *,
float *, int *, float *, float *, int *);
int BLASFUNC(zsbmv)(char *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(xsbmv)(char *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(chbmv)(char *, int *, int *, float *, float *, int *,
float *, int *, float *, float *, int *);
int BLASFUNC(zhbmv)(char *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(xhbmv)(char *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
/* Level 3 routines */
int BLASFUNC(sgemm)(const char *, const char *, const int *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(dgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(qgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(cgemm)(const char *, const char *, const int *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(xgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(cgemm3m)(char *, char *, int *, int *, int *, float *,
float *, int *, float *, int *, float *, float *, int *);
int BLASFUNC(zgemm3m)(char *, char *, int *, int *, int *, double *,
double *, int *, double *, int *, double *, double *, int *);
int BLASFUNC(xgemm3m)(char *, char *, int *, int *, int *, double *,
double *, int *, double *, int *, double *, double *, int *);
int BLASFUNC(sge2mm)(char *, char *, char *, int *, int *,
float *, float *, int *, float *, int *,
float *, float *, int *);
int BLASFUNC(dge2mm)(char *, char *, char *, int *, int *,
double *, double *, int *, double *, int *,
double *, double *, int *);
int BLASFUNC(cge2mm)(char *, char *, char *, int *, int *,
float *, float *, int *, float *, int *,
float *, float *, int *);
int BLASFUNC(zge2mm)(char *, char *, char *, int *, int *,
double *, double *, int *, double *, int *,
double *, double *, int *);
int BLASFUNC(strsm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, float *, const int *);
int BLASFUNC(dtrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(qtrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(ctrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, float *, const int *);
int BLASFUNC(ztrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(xtrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(strmm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, float *, const int *);
int BLASFUNC(dtrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(qtrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(ctrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, float *, const int *);
int BLASFUNC(ztrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(xtrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);
int BLASFUNC(ssymm)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(dsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(qsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(csymm)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(xsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(csymm3m)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *);
int BLASFUNC(zsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *);
int BLASFUNC(xsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *);
int BLASFUNC(ssyrk)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(dsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(qsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(csyrk)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(xsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(ssyr2k)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(dsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);
int BLASFUNC(qsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);
int BLASFUNC(csyr2k)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);
int BLASFUNC(xsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);
int BLASFUNC(chemm)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zhemm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(xhemm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(chemm3m)(char *, char *, int *, int *, float *, float *, int *,
float *, int *, float *, float *, int *);
int BLASFUNC(zhemm3m)(char *, char *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(xhemm3m)(char *, char *, int *, int *, double *, double *, int *,
double *, int *, double *, double *, int *);
int BLASFUNC(cherk)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zherk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(xherk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(cher2k)(const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zher2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(xher2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(cher2m)(const char *, const char *, const char *, const int *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zher2m)(const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);
int BLASFUNC(xher2m)(const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);
#ifdef __cplusplus
}
#endif
#endif
| 30,560 | 68.29932 | 201 |
h
|
abess
|
abess-master/python/include/Eigen/src/misc/lapack.h
|
#ifndef LAPACK_H
#define LAPACK_H
#include "blas.h"
#ifdef __cplusplus
extern "C"
{
#endif
int BLASFUNC(csymv) (const char *, const int *, const float *, const float *, const int *, const float *, const int *, const float *, float *, const int *);
int BLASFUNC(zsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(xsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);
int BLASFUNC(cspmv) (char *, int *, float *, float *,
float *, int *, float *, float *, int *);
int BLASFUNC(zspmv) (char *, int *, double *, double *,
double *, int *, double *, double *, int *);
int BLASFUNC(xspmv) (char *, int *, double *, double *,
double *, int *, double *, double *, int *);
int BLASFUNC(csyr) (char *, int *, float *, float *, int *,
float *, int *);
int BLASFUNC(zsyr) (char *, int *, double *, double *, int *,
double *, int *);
int BLASFUNC(xsyr) (char *, int *, double *, double *, int *,
double *, int *);
int BLASFUNC(cspr) (char *, int *, float *, float *, int *,
float *);
int BLASFUNC(zspr) (char *, int *, double *, double *, int *,
double *);
int BLASFUNC(xspr) (char *, int *, double *, double *, int *,
double *);
int BLASFUNC(sgemt)(char *, int *, int *, float *, float *, int *,
float *, int *);
int BLASFUNC(dgemt)(char *, int *, int *, double *, double *, int *,
double *, int *);
int BLASFUNC(cgemt)(char *, int *, int *, float *, float *, int *,
float *, int *);
int BLASFUNC(zgemt)(char *, int *, int *, double *, double *, int *,
double *, int *);
int BLASFUNC(sgema)(char *, char *, int *, int *, float *,
float *, int *, float *, float *, int *, float *, int *);
int BLASFUNC(dgema)(char *, char *, int *, int *, double *,
double *, int *, double*, double *, int *, double*, int *);
int BLASFUNC(cgema)(char *, char *, int *, int *, float *,
float *, int *, float *, float *, int *, float *, int *);
int BLASFUNC(zgema)(char *, char *, int *, int *, double *,
double *, int *, double*, double *, int *, double*, int *);
int BLASFUNC(sgems)(char *, char *, int *, int *, float *,
float *, int *, float *, float *, int *, float *, int *);
int BLASFUNC(dgems)(char *, char *, int *, int *, double *,
double *, int *, double*, double *, int *, double*, int *);
int BLASFUNC(cgems)(char *, char *, int *, int *, float *,
float *, int *, float *, float *, int *, float *, int *);
int BLASFUNC(zgems)(char *, char *, int *, int *, double *,
double *, int *, double*, double *, int *, double*, int *);
int BLASFUNC(sgetf2)(int *, int *, float *, int *, int *, int *);
int BLASFUNC(dgetf2)(int *, int *, double *, int *, int *, int *);
int BLASFUNC(qgetf2)(int *, int *, double *, int *, int *, int *);
int BLASFUNC(cgetf2)(int *, int *, float *, int *, int *, int *);
int BLASFUNC(zgetf2)(int *, int *, double *, int *, int *, int *);
int BLASFUNC(xgetf2)(int *, int *, double *, int *, int *, int *);
int BLASFUNC(sgetrf)(int *, int *, float *, int *, int *, int *);
int BLASFUNC(dgetrf)(int *, int *, double *, int *, int *, int *);
int BLASFUNC(qgetrf)(int *, int *, double *, int *, int *, int *);
int BLASFUNC(cgetrf)(int *, int *, float *, int *, int *, int *);
int BLASFUNC(zgetrf)(int *, int *, double *, int *, int *, int *);
int BLASFUNC(xgetrf)(int *, int *, double *, int *, int *, int *);
int BLASFUNC(slaswp)(int *, float *, int *, int *, int *, int *, int *);
int BLASFUNC(dlaswp)(int *, double *, int *, int *, int *, int *, int *);
int BLASFUNC(qlaswp)(int *, double *, int *, int *, int *, int *, int *);
int BLASFUNC(claswp)(int *, float *, int *, int *, int *, int *, int *);
int BLASFUNC(zlaswp)(int *, double *, int *, int *, int *, int *, int *);
int BLASFUNC(xlaswp)(int *, double *, int *, int *, int *, int *, int *);
int BLASFUNC(sgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *);
int BLASFUNC(dgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
int BLASFUNC(qgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
int BLASFUNC(cgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *);
int BLASFUNC(zgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
int BLASFUNC(xgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
int BLASFUNC(sgesv)(int *, int *, float *, int *, int *, float *, int *, int *);
int BLASFUNC(dgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
int BLASFUNC(qgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
int BLASFUNC(cgesv)(int *, int *, float *, int *, int *, float *, int *, int *);
int BLASFUNC(zgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
int BLASFUNC(xgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
int BLASFUNC(spotf2)(char *, int *, float *, int *, int *);
int BLASFUNC(dpotf2)(char *, int *, double *, int *, int *);
int BLASFUNC(qpotf2)(char *, int *, double *, int *, int *);
int BLASFUNC(cpotf2)(char *, int *, float *, int *, int *);
int BLASFUNC(zpotf2)(char *, int *, double *, int *, int *);
int BLASFUNC(xpotf2)(char *, int *, double *, int *, int *);
int BLASFUNC(spotrf)(char *, int *, float *, int *, int *);
int BLASFUNC(dpotrf)(char *, int *, double *, int *, int *);
int BLASFUNC(qpotrf)(char *, int *, double *, int *, int *);
int BLASFUNC(cpotrf)(char *, int *, float *, int *, int *);
int BLASFUNC(zpotrf)(char *, int *, double *, int *, int *);
int BLASFUNC(xpotrf)(char *, int *, double *, int *, int *);
int BLASFUNC(slauu2)(char *, int *, float *, int *, int *);
int BLASFUNC(dlauu2)(char *, int *, double *, int *, int *);
int BLASFUNC(qlauu2)(char *, int *, double *, int *, int *);
int BLASFUNC(clauu2)(char *, int *, float *, int *, int *);
int BLASFUNC(zlauu2)(char *, int *, double *, int *, int *);
int BLASFUNC(xlauu2)(char *, int *, double *, int *, int *);
int BLASFUNC(slauum)(char *, int *, float *, int *, int *);
int BLASFUNC(dlauum)(char *, int *, double *, int *, int *);
int BLASFUNC(qlauum)(char *, int *, double *, int *, int *);
int BLASFUNC(clauum)(char *, int *, float *, int *, int *);
int BLASFUNC(zlauum)(char *, int *, double *, int *, int *);
int BLASFUNC(xlauum)(char *, int *, double *, int *, int *);
int BLASFUNC(strti2)(char *, char *, int *, float *, int *, int *);
int BLASFUNC(dtrti2)(char *, char *, int *, double *, int *, int *);
int BLASFUNC(qtrti2)(char *, char *, int *, double *, int *, int *);
int BLASFUNC(ctrti2)(char *, char *, int *, float *, int *, int *);
int BLASFUNC(ztrti2)(char *, char *, int *, double *, int *, int *);
int BLASFUNC(xtrti2)(char *, char *, int *, double *, int *, int *);
int BLASFUNC(strtri)(char *, char *, int *, float *, int *, int *);
int BLASFUNC(dtrtri)(char *, char *, int *, double *, int *, int *);
int BLASFUNC(qtrtri)(char *, char *, int *, double *, int *, int *);
int BLASFUNC(ctrtri)(char *, char *, int *, float *, int *, int *);
int BLASFUNC(ztrtri)(char *, char *, int *, double *, int *, int *);
int BLASFUNC(xtrtri)(char *, char *, int *, double *, int *, int *);
int BLASFUNC(spotri)(char *, int *, float *, int *, int *);
int BLASFUNC(dpotri)(char *, int *, double *, int *, int *);
int BLASFUNC(qpotri)(char *, int *, double *, int *, int *);
int BLASFUNC(cpotri)(char *, int *, float *, int *, int *);
int BLASFUNC(zpotri)(char *, int *, double *, int *, int *);
int BLASFUNC(xpotri)(char *, int *, double *, int *, int *);
#ifdef __cplusplus
}
#endif
#endif
| 7,834 | 50.20915 | 161 |
h
|
abess
|
abess-master/python/include/Eigen/src/misc/lapacke.h
|
/*****************************************************************************
Copyright (c) 2010, Intel Corp.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************
* Contents: Native C interface to LAPACK
* Author: Intel Corporation
* Generated November, 2011
*****************************************************************************/
#ifndef _MKL_LAPACKE_H_
#ifndef _LAPACKE_H_
#define _LAPACKE_H_
/*
* Turn on HAVE_LAPACK_CONFIG_H to redefine C-LAPACK datatypes
*/
#ifdef HAVE_LAPACK_CONFIG_H
#include "lapacke_config.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#include <stdlib.h>
#ifndef lapack_int
#define lapack_int int
#endif
#ifndef lapack_logical
#define lapack_logical lapack_int
#endif
/* Complex types are structures equivalent to the
* Fortran complex types COMPLEX(4) and COMPLEX(8).
*
* One can also redefine the types with his own types
* for example by including in the code definitions like
*
* #define lapack_complex_float std::complex<float>
* #define lapack_complex_double std::complex<double>
*
* or define these types in the command line:
*
* -Dlapack_complex_float="std::complex<float>"
* -Dlapack_complex_double="std::complex<double>"
*/
#ifndef LAPACK_COMPLEX_CUSTOM
/* Complex type (single precision) */
#ifndef lapack_complex_float
#include <complex.h>
#define lapack_complex_float float _Complex
#endif
#ifndef lapack_complex_float_real
#define lapack_complex_float_real(z) (creal(z))
#endif
#ifndef lapack_complex_float_imag
#define lapack_complex_float_imag(z) (cimag(z))
#endif
lapack_complex_float lapack_make_complex_float( float re, float im );
/* Complex type (double precision) */
#ifndef lapack_complex_double
#include <complex.h>
#define lapack_complex_double double _Complex
#endif
#ifndef lapack_complex_double_real
#define lapack_complex_double_real(z) (creal(z))
#endif
#ifndef lapack_complex_double_imag
#define lapack_complex_double_imag(z) (cimag(z))
#endif
lapack_complex_double lapack_make_complex_double( double re, double im );
#endif
#ifndef LAPACKE_malloc
#define LAPACKE_malloc( size ) malloc( size )
#endif
#ifndef LAPACKE_free
#define LAPACKE_free( p ) free( p )
#endif
#define LAPACK_C2INT( x ) (lapack_int)(*((float*)&x ))
#define LAPACK_Z2INT( x ) (lapack_int)(*((double*)&x ))
#define LAPACK_ROW_MAJOR 101
#define LAPACK_COL_MAJOR 102
#define LAPACK_WORK_MEMORY_ERROR -1010
#define LAPACK_TRANSPOSE_MEMORY_ERROR -1011
/* Callback logical functions of one, two, or three arguments are used
* to select eigenvalues to sort to the top left of the Schur form.
* The value is selected if function returns TRUE (non-zero). */
typedef lapack_logical (*LAPACK_S_SELECT2) ( const float*, const float* );
typedef lapack_logical (*LAPACK_S_SELECT3)
( const float*, const float*, const float* );
typedef lapack_logical (*LAPACK_D_SELECT2) ( const double*, const double* );
typedef lapack_logical (*LAPACK_D_SELECT3)
( const double*, const double*, const double* );
typedef lapack_logical (*LAPACK_C_SELECT1) ( const lapack_complex_float* );
typedef lapack_logical (*LAPACK_C_SELECT2)
( const lapack_complex_float*, const lapack_complex_float* );
typedef lapack_logical (*LAPACK_Z_SELECT1) ( const lapack_complex_double* );
typedef lapack_logical (*LAPACK_Z_SELECT2)
( const lapack_complex_double*, const lapack_complex_double* );
#include "lapacke_mangling.h"
#define LAPACK_lsame LAPACK_GLOBAL(lsame,LSAME)
lapack_logical LAPACK_lsame( char* ca, char* cb,
lapack_int lca, lapack_int lcb );
/* C-LAPACK function prototypes */
lapack_int LAPACKE_sbdsdc( int matrix_order, char uplo, char compq,
lapack_int n, float* d, float* e, float* u,
lapack_int ldu, float* vt, lapack_int ldvt, float* q,
lapack_int* iq );
lapack_int LAPACKE_dbdsdc( int matrix_order, char uplo, char compq,
lapack_int n, double* d, double* e, double* u,
lapack_int ldu, double* vt, lapack_int ldvt,
double* q, lapack_int* iq );
lapack_int LAPACKE_sbdsqr( int matrix_order, char uplo, lapack_int n,
lapack_int ncvt, lapack_int nru, lapack_int ncc,
float* d, float* e, float* vt, lapack_int ldvt,
float* u, lapack_int ldu, float* c, lapack_int ldc );
lapack_int LAPACKE_dbdsqr( int matrix_order, char uplo, lapack_int n,
lapack_int ncvt, lapack_int nru, lapack_int ncc,
double* d, double* e, double* vt, lapack_int ldvt,
double* u, lapack_int ldu, double* c,
lapack_int ldc );
lapack_int LAPACKE_cbdsqr( int matrix_order, char uplo, lapack_int n,
lapack_int ncvt, lapack_int nru, lapack_int ncc,
float* d, float* e, lapack_complex_float* vt,
lapack_int ldvt, lapack_complex_float* u,
lapack_int ldu, lapack_complex_float* c,
lapack_int ldc );
lapack_int LAPACKE_zbdsqr( int matrix_order, char uplo, lapack_int n,
lapack_int ncvt, lapack_int nru, lapack_int ncc,
double* d, double* e, lapack_complex_double* vt,
lapack_int ldvt, lapack_complex_double* u,
lapack_int ldu, lapack_complex_double* c,
lapack_int ldc );
lapack_int LAPACKE_sdisna( char job, lapack_int m, lapack_int n, const float* d,
float* sep );
lapack_int LAPACKE_ddisna( char job, lapack_int m, lapack_int n,
const double* d, double* sep );
lapack_int LAPACKE_sgbbrd( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int ncc, lapack_int kl,
lapack_int ku, float* ab, lapack_int ldab, float* d,
float* e, float* q, lapack_int ldq, float* pt,
lapack_int ldpt, float* c, lapack_int ldc );
lapack_int LAPACKE_dgbbrd( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int ncc, lapack_int kl,
lapack_int ku, double* ab, lapack_int ldab,
double* d, double* e, double* q, lapack_int ldq,
double* pt, lapack_int ldpt, double* c,
lapack_int ldc );
lapack_int LAPACKE_cgbbrd( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int ncc, lapack_int kl,
lapack_int ku, lapack_complex_float* ab,
lapack_int ldab, float* d, float* e,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* pt, lapack_int ldpt,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zgbbrd( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int ncc, lapack_int kl,
lapack_int ku, lapack_complex_double* ab,
lapack_int ldab, double* d, double* e,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* pt, lapack_int ldpt,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_sgbcon( int matrix_order, char norm, lapack_int n,
lapack_int kl, lapack_int ku, const float* ab,
lapack_int ldab, const lapack_int* ipiv, float anorm,
float* rcond );
lapack_int LAPACKE_dgbcon( int matrix_order, char norm, lapack_int n,
lapack_int kl, lapack_int ku, const double* ab,
lapack_int ldab, const lapack_int* ipiv,
double anorm, double* rcond );
lapack_int LAPACKE_cgbcon( int matrix_order, char norm, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_float* ab, lapack_int ldab,
const lapack_int* ipiv, float anorm, float* rcond );
lapack_int LAPACKE_zgbcon( int matrix_order, char norm, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_double* ab, lapack_int ldab,
const lapack_int* ipiv, double anorm,
double* rcond );
lapack_int LAPACKE_sgbequ( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const float* ab,
lapack_int ldab, float* r, float* c, float* rowcnd,
float* colcnd, float* amax );
lapack_int LAPACKE_dgbequ( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const double* ab,
lapack_int ldab, double* r, double* c,
double* rowcnd, double* colcnd, double* amax );
lapack_int LAPACKE_cgbequ( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_float* ab, lapack_int ldab,
float* r, float* c, float* rowcnd, float* colcnd,
float* amax );
lapack_int LAPACKE_zgbequ( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_double* ab, lapack_int ldab,
double* r, double* c, double* rowcnd, double* colcnd,
double* amax );
lapack_int LAPACKE_sgbequb( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const float* ab,
lapack_int ldab, float* r, float* c, float* rowcnd,
float* colcnd, float* amax );
lapack_int LAPACKE_dgbequb( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const double* ab,
lapack_int ldab, double* r, double* c,
double* rowcnd, double* colcnd, double* amax );
lapack_int LAPACKE_cgbequb( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_float* ab, lapack_int ldab,
float* r, float* c, float* rowcnd, float* colcnd,
float* amax );
lapack_int LAPACKE_zgbequb( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_double* ab, lapack_int ldab,
double* r, double* c, double* rowcnd,
double* colcnd, double* amax );
lapack_int LAPACKE_sgbrfs( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const float* ab, lapack_int ldab, const float* afb,
lapack_int ldafb, const lapack_int* ipiv,
const float* b, lapack_int ldb, float* x,
lapack_int ldx, float* ferr, float* berr );
lapack_int LAPACKE_dgbrfs( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const double* ab, lapack_int ldab, const double* afb,
lapack_int ldafb, const lapack_int* ipiv,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* ferr, double* berr );
lapack_int LAPACKE_cgbrfs( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
const lapack_complex_float* afb, lapack_int ldafb,
const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zgbrfs( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const lapack_complex_double* ab, lapack_int ldab,
const lapack_complex_double* afb, lapack_int ldafb,
const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_sgbrfsx( int matrix_order, char trans, char equed,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, const float* ab, lapack_int ldab,
const float* afb, lapack_int ldafb,
const lapack_int* ipiv, const float* r,
const float* c, const float* b, lapack_int ldb,
float* x, lapack_int ldx, float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_dgbrfsx( int matrix_order, char trans, char equed,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, const double* ab, lapack_int ldab,
const double* afb, lapack_int ldafb,
const lapack_int* ipiv, const double* r,
const double* c, const double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_cgbrfsx( int matrix_order, char trans, char equed,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, const lapack_complex_float* ab,
lapack_int ldab, const lapack_complex_float* afb,
lapack_int ldafb, const lapack_int* ipiv,
const float* r, const float* c,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params );
lapack_int LAPACKE_zgbrfsx( int matrix_order, char trans, char equed,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, const lapack_complex_double* ab,
lapack_int ldab, const lapack_complex_double* afb,
lapack_int ldafb, const lapack_int* ipiv,
const double* r, const double* c,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_sgbsv( int matrix_order, lapack_int n, lapack_int kl,
lapack_int ku, lapack_int nrhs, float* ab,
lapack_int ldab, lapack_int* ipiv, float* b,
lapack_int ldb );
lapack_int LAPACKE_dgbsv( int matrix_order, lapack_int n, lapack_int kl,
lapack_int ku, lapack_int nrhs, double* ab,
lapack_int ldab, lapack_int* ipiv, double* b,
lapack_int ldb );
lapack_int LAPACKE_cgbsv( int matrix_order, lapack_int n, lapack_int kl,
lapack_int ku, lapack_int nrhs,
lapack_complex_float* ab, lapack_int ldab,
lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zgbsv( int matrix_order, lapack_int n, lapack_int kl,
lapack_int ku, lapack_int nrhs,
lapack_complex_double* ab, lapack_int ldab,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_sgbsvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, float* ab, lapack_int ldab,
float* afb, lapack_int ldafb, lapack_int* ipiv,
char* equed, float* r, float* c, float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
float* rpivot );
lapack_int LAPACKE_dgbsvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, double* ab, lapack_int ldab,
double* afb, lapack_int ldafb, lapack_int* ipiv,
char* equed, double* r, double* c, double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
double* rpivot );
lapack_int LAPACKE_cgbsvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, lapack_complex_float* ab,
lapack_int ldab, lapack_complex_float* afb,
lapack_int ldafb, lapack_int* ipiv, char* equed,
float* r, float* c, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, float* rpivot );
lapack_int LAPACKE_zgbsvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, lapack_complex_double* ab,
lapack_int ldab, lapack_complex_double* afb,
lapack_int ldafb, lapack_int* ipiv, char* equed,
double* r, double* c, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* ferr,
double* berr, double* rpivot );
lapack_int LAPACKE_sgbsvxx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, float* ab, lapack_int ldab,
float* afb, lapack_int ldafb, lapack_int* ipiv,
char* equed, float* r, float* c, float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_dgbsvxx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, double* ab, lapack_int ldab,
double* afb, lapack_int ldafb, lapack_int* ipiv,
char* equed, double* r, double* c, double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params );
lapack_int LAPACKE_cgbsvxx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, lapack_complex_float* ab,
lapack_int ldab, lapack_complex_float* afb,
lapack_int ldafb, lapack_int* ipiv, char* equed,
float* r, float* c, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* rpvgrw,
float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params );
lapack_int LAPACKE_zgbsvxx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, lapack_complex_double* ab,
lapack_int ldab, lapack_complex_double* afb,
lapack_int ldafb, lapack_int* ipiv, char* equed,
double* r, double* c, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* rpvgrw,
double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_sgbtrf( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, float* ab,
lapack_int ldab, lapack_int* ipiv );
lapack_int LAPACKE_dgbtrf( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, double* ab,
lapack_int ldab, lapack_int* ipiv );
lapack_int LAPACKE_cgbtrf( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
lapack_complex_float* ab, lapack_int ldab,
lapack_int* ipiv );
lapack_int LAPACKE_zgbtrf( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
lapack_complex_double* ab, lapack_int ldab,
lapack_int* ipiv );
lapack_int LAPACKE_sgbtrs( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const float* ab, lapack_int ldab,
const lapack_int* ipiv, float* b, lapack_int ldb );
lapack_int LAPACKE_dgbtrs( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const double* ab, lapack_int ldab,
const lapack_int* ipiv, double* b, lapack_int ldb );
lapack_int LAPACKE_cgbtrs( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zgbtrs( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const lapack_complex_double* ab, lapack_int ldab,
const lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_sgebak( int matrix_order, char job, char side, lapack_int n,
lapack_int ilo, lapack_int ihi, const float* scale,
lapack_int m, float* v, lapack_int ldv );
lapack_int LAPACKE_dgebak( int matrix_order, char job, char side, lapack_int n,
lapack_int ilo, lapack_int ihi, const double* scale,
lapack_int m, double* v, lapack_int ldv );
lapack_int LAPACKE_cgebak( int matrix_order, char job, char side, lapack_int n,
lapack_int ilo, lapack_int ihi, const float* scale,
lapack_int m, lapack_complex_float* v,
lapack_int ldv );
lapack_int LAPACKE_zgebak( int matrix_order, char job, char side, lapack_int n,
lapack_int ilo, lapack_int ihi, const double* scale,
lapack_int m, lapack_complex_double* v,
lapack_int ldv );
lapack_int LAPACKE_sgebal( int matrix_order, char job, lapack_int n, float* a,
lapack_int lda, lapack_int* ilo, lapack_int* ihi,
float* scale );
lapack_int LAPACKE_dgebal( int matrix_order, char job, lapack_int n, double* a,
lapack_int lda, lapack_int* ilo, lapack_int* ihi,
double* scale );
lapack_int LAPACKE_cgebal( int matrix_order, char job, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ilo, lapack_int* ihi, float* scale );
lapack_int LAPACKE_zgebal( int matrix_order, char job, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ilo, lapack_int* ihi, double* scale );
lapack_int LAPACKE_sgebrd( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* d, float* e,
float* tauq, float* taup );
lapack_int LAPACKE_dgebrd( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* d, double* e,
double* tauq, double* taup );
lapack_int LAPACKE_cgebrd( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda, float* d,
float* e, lapack_complex_float* tauq,
lapack_complex_float* taup );
lapack_int LAPACKE_zgebrd( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda, double* d,
double* e, lapack_complex_double* tauq,
lapack_complex_double* taup );
lapack_int LAPACKE_sgecon( int matrix_order, char norm, lapack_int n,
const float* a, lapack_int lda, float anorm,
float* rcond );
lapack_int LAPACKE_dgecon( int matrix_order, char norm, lapack_int n,
const double* a, lapack_int lda, double anorm,
double* rcond );
lapack_int LAPACKE_cgecon( int matrix_order, char norm, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float anorm, float* rcond );
lapack_int LAPACKE_zgecon( int matrix_order, char norm, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double anorm, double* rcond );
lapack_int LAPACKE_sgeequ( int matrix_order, lapack_int m, lapack_int n,
const float* a, lapack_int lda, float* r, float* c,
float* rowcnd, float* colcnd, float* amax );
lapack_int LAPACKE_dgeequ( int matrix_order, lapack_int m, lapack_int n,
const double* a, lapack_int lda, double* r,
double* c, double* rowcnd, double* colcnd,
double* amax );
lapack_int LAPACKE_cgeequ( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* r, float* c, float* rowcnd, float* colcnd,
float* amax );
lapack_int LAPACKE_zgeequ( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* r, double* c, double* rowcnd, double* colcnd,
double* amax );
lapack_int LAPACKE_sgeequb( int matrix_order, lapack_int m, lapack_int n,
const float* a, lapack_int lda, float* r, float* c,
float* rowcnd, float* colcnd, float* amax );
lapack_int LAPACKE_dgeequb( int matrix_order, lapack_int m, lapack_int n,
const double* a, lapack_int lda, double* r,
double* c, double* rowcnd, double* colcnd,
double* amax );
lapack_int LAPACKE_cgeequb( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* r, float* c, float* rowcnd, float* colcnd,
float* amax );
lapack_int LAPACKE_zgeequb( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* r, double* c, double* rowcnd,
double* colcnd, double* amax );
lapack_int LAPACKE_sgees( int matrix_order, char jobvs, char sort,
LAPACK_S_SELECT2 select, lapack_int n, float* a,
lapack_int lda, lapack_int* sdim, float* wr,
float* wi, float* vs, lapack_int ldvs );
lapack_int LAPACKE_dgees( int matrix_order, char jobvs, char sort,
LAPACK_D_SELECT2 select, lapack_int n, double* a,
lapack_int lda, lapack_int* sdim, double* wr,
double* wi, double* vs, lapack_int ldvs );
lapack_int LAPACKE_cgees( int matrix_order, char jobvs, char sort,
LAPACK_C_SELECT1 select, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* sdim, lapack_complex_float* w,
lapack_complex_float* vs, lapack_int ldvs );
lapack_int LAPACKE_zgees( int matrix_order, char jobvs, char sort,
LAPACK_Z_SELECT1 select, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* sdim, lapack_complex_double* w,
lapack_complex_double* vs, lapack_int ldvs );
lapack_int LAPACKE_sgeesx( int matrix_order, char jobvs, char sort,
LAPACK_S_SELECT2 select, char sense, lapack_int n,
float* a, lapack_int lda, lapack_int* sdim,
float* wr, float* wi, float* vs, lapack_int ldvs,
float* rconde, float* rcondv );
lapack_int LAPACKE_dgeesx( int matrix_order, char jobvs, char sort,
LAPACK_D_SELECT2 select, char sense, lapack_int n,
double* a, lapack_int lda, lapack_int* sdim,
double* wr, double* wi, double* vs, lapack_int ldvs,
double* rconde, double* rcondv );
lapack_int LAPACKE_cgeesx( int matrix_order, char jobvs, char sort,
LAPACK_C_SELECT1 select, char sense, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* sdim, lapack_complex_float* w,
lapack_complex_float* vs, lapack_int ldvs,
float* rconde, float* rcondv );
lapack_int LAPACKE_zgeesx( int matrix_order, char jobvs, char sort,
LAPACK_Z_SELECT1 select, char sense, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* sdim, lapack_complex_double* w,
lapack_complex_double* vs, lapack_int ldvs,
double* rconde, double* rcondv );
lapack_int LAPACKE_sgeev( int matrix_order, char jobvl, char jobvr,
lapack_int n, float* a, lapack_int lda, float* wr,
float* wi, float* vl, lapack_int ldvl, float* vr,
lapack_int ldvr );
lapack_int LAPACKE_dgeev( int matrix_order, char jobvl, char jobvr,
lapack_int n, double* a, lapack_int lda, double* wr,
double* wi, double* vl, lapack_int ldvl, double* vr,
lapack_int ldvr );
lapack_int LAPACKE_cgeev( int matrix_order, char jobvl, char jobvr,
lapack_int n, lapack_complex_float* a, lapack_int lda,
lapack_complex_float* w, lapack_complex_float* vl,
lapack_int ldvl, lapack_complex_float* vr,
lapack_int ldvr );
lapack_int LAPACKE_zgeev( int matrix_order, char jobvl, char jobvr,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* w,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr );
lapack_int LAPACKE_sgeevx( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n, float* a,
lapack_int lda, float* wr, float* wi, float* vl,
lapack_int ldvl, float* vr, lapack_int ldvr,
lapack_int* ilo, lapack_int* ihi, float* scale,
float* abnrm, float* rconde, float* rcondv );
lapack_int LAPACKE_dgeevx( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n, double* a,
lapack_int lda, double* wr, double* wi, double* vl,
lapack_int ldvl, double* vr, lapack_int ldvr,
lapack_int* ilo, lapack_int* ihi, double* scale,
double* abnrm, double* rconde, double* rcondv );
lapack_int LAPACKE_cgeevx( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* w, lapack_complex_float* vl,
lapack_int ldvl, lapack_complex_float* vr,
lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,
float* scale, float* abnrm, float* rconde,
float* rcondv );
lapack_int LAPACKE_zgeevx( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* w, lapack_complex_double* vl,
lapack_int ldvl, lapack_complex_double* vr,
lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,
double* scale, double* abnrm, double* rconde,
double* rcondv );
lapack_int LAPACKE_sgehrd( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, float* a, lapack_int lda,
float* tau );
lapack_int LAPACKE_dgehrd( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, double* a, lapack_int lda,
double* tau );
lapack_int LAPACKE_cgehrd( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* tau );
lapack_int LAPACKE_zgehrd( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* tau );
lapack_int LAPACKE_sgejsv( int matrix_order, char joba, char jobu, char jobv,
char jobr, char jobt, char jobp, lapack_int m,
lapack_int n, float* a, lapack_int lda, float* sva,
float* u, lapack_int ldu, float* v, lapack_int ldv,
float* stat, lapack_int* istat );
lapack_int LAPACKE_dgejsv( int matrix_order, char joba, char jobu, char jobv,
char jobr, char jobt, char jobp, lapack_int m,
lapack_int n, double* a, lapack_int lda, double* sva,
double* u, lapack_int ldu, double* v, lapack_int ldv,
double* stat, lapack_int* istat );
lapack_int LAPACKE_sgelq2( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau );
lapack_int LAPACKE_dgelq2( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau );
lapack_int LAPACKE_cgelq2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau );
lapack_int LAPACKE_zgelq2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau );
lapack_int LAPACKE_sgelqf( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau );
lapack_int LAPACKE_dgelqf( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau );
lapack_int LAPACKE_cgelqf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau );
lapack_int LAPACKE_zgelqf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau );
lapack_int LAPACKE_sgels( int matrix_order, char trans, lapack_int m,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* b, lapack_int ldb );
lapack_int LAPACKE_dgels( int matrix_order, char trans, lapack_int m,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* b, lapack_int ldb );
lapack_int LAPACKE_cgels( int matrix_order, char trans, lapack_int m,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zgels( int matrix_order, char trans, lapack_int m,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sgelsd( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda, float* b,
lapack_int ldb, float* s, float rcond,
lapack_int* rank );
lapack_int LAPACKE_dgelsd( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* b, lapack_int ldb, double* s, double rcond,
lapack_int* rank );
lapack_int LAPACKE_cgelsd( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, float* s, float rcond,
lapack_int* rank );
lapack_int LAPACKE_zgelsd( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, double* s, double rcond,
lapack_int* rank );
lapack_int LAPACKE_sgelss( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda, float* b,
lapack_int ldb, float* s, float rcond,
lapack_int* rank );
lapack_int LAPACKE_dgelss( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* b, lapack_int ldb, double* s, double rcond,
lapack_int* rank );
lapack_int LAPACKE_cgelss( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, float* s, float rcond,
lapack_int* rank );
lapack_int LAPACKE_zgelss( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, double* s, double rcond,
lapack_int* rank );
lapack_int LAPACKE_sgelsy( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda, float* b,
lapack_int ldb, lapack_int* jpvt, float rcond,
lapack_int* rank );
lapack_int LAPACKE_dgelsy( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* b, lapack_int ldb, lapack_int* jpvt,
double rcond, lapack_int* rank );
lapack_int LAPACKE_cgelsy( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, lapack_int* jpvt, float rcond,
lapack_int* rank );
lapack_int LAPACKE_zgelsy( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_int* jpvt, double rcond,
lapack_int* rank );
lapack_int LAPACKE_sgeqlf( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau );
lapack_int LAPACKE_dgeqlf( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau );
lapack_int LAPACKE_cgeqlf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau );
lapack_int LAPACKE_zgeqlf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau );
lapack_int LAPACKE_sgeqp3( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, lapack_int* jpvt,
float* tau );
lapack_int LAPACKE_dgeqp3( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, lapack_int* jpvt,
double* tau );
lapack_int LAPACKE_cgeqp3( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* jpvt, lapack_complex_float* tau );
lapack_int LAPACKE_zgeqp3( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* jpvt, lapack_complex_double* tau );
lapack_int LAPACKE_sgeqpf( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, lapack_int* jpvt,
float* tau );
lapack_int LAPACKE_dgeqpf( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, lapack_int* jpvt,
double* tau );
lapack_int LAPACKE_cgeqpf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* jpvt, lapack_complex_float* tau );
lapack_int LAPACKE_zgeqpf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* jpvt, lapack_complex_double* tau );
lapack_int LAPACKE_sgeqr2( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau );
lapack_int LAPACKE_dgeqr2( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau );
lapack_int LAPACKE_cgeqr2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau );
lapack_int LAPACKE_zgeqr2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau );
lapack_int LAPACKE_sgeqrf( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau );
lapack_int LAPACKE_dgeqrf( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau );
lapack_int LAPACKE_cgeqrf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau );
lapack_int LAPACKE_zgeqrf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau );
lapack_int LAPACKE_sgeqrfp( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau );
lapack_int LAPACKE_dgeqrfp( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau );
lapack_int LAPACKE_cgeqrfp( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau );
lapack_int LAPACKE_zgeqrfp( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau );
lapack_int LAPACKE_sgerfs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const float* af, lapack_int ldaf,
const lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr );
lapack_int LAPACKE_dgerfs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const double* a, lapack_int lda,
const double* af, lapack_int ldaf,
const lapack_int* ipiv, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_cgerfs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zgerfs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_sgerfsx( int matrix_order, char trans, char equed,
lapack_int n, lapack_int nrhs, const float* a,
lapack_int lda, const float* af, lapack_int ldaf,
const lapack_int* ipiv, const float* r,
const float* c, const float* b, lapack_int ldb,
float* x, lapack_int ldx, float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_dgerfsx( int matrix_order, char trans, char equed,
lapack_int n, lapack_int nrhs, const double* a,
lapack_int lda, const double* af, lapack_int ldaf,
const lapack_int* ipiv, const double* r,
const double* c, const double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_cgerfsx( int matrix_order, char trans, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* af, lapack_int ldaf,
const lapack_int* ipiv, const float* r,
const float* c, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_zgerfsx( int matrix_order, char trans, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* af, lapack_int ldaf,
const lapack_int* ipiv, const double* r,
const double* c, const lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params );
lapack_int LAPACKE_sgerqf( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau );
lapack_int LAPACKE_dgerqf( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau );
lapack_int LAPACKE_cgerqf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau );
lapack_int LAPACKE_zgerqf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau );
lapack_int LAPACKE_sgesdd( int matrix_order, char jobz, lapack_int m,
lapack_int n, float* a, lapack_int lda, float* s,
float* u, lapack_int ldu, float* vt,
lapack_int ldvt );
lapack_int LAPACKE_dgesdd( int matrix_order, char jobz, lapack_int m,
lapack_int n, double* a, lapack_int lda, double* s,
double* u, lapack_int ldu, double* vt,
lapack_int ldvt );
lapack_int LAPACKE_cgesdd( int matrix_order, char jobz, lapack_int m,
lapack_int n, lapack_complex_float* a,
lapack_int lda, float* s, lapack_complex_float* u,
lapack_int ldu, lapack_complex_float* vt,
lapack_int ldvt );
lapack_int LAPACKE_zgesdd( int matrix_order, char jobz, lapack_int m,
lapack_int n, lapack_complex_double* a,
lapack_int lda, double* s, lapack_complex_double* u,
lapack_int ldu, lapack_complex_double* vt,
lapack_int ldvt );
lapack_int LAPACKE_sgesv( int matrix_order, lapack_int n, lapack_int nrhs,
float* a, lapack_int lda, lapack_int* ipiv, float* b,
lapack_int ldb );
lapack_int LAPACKE_dgesv( int matrix_order, lapack_int n, lapack_int nrhs,
double* a, lapack_int lda, lapack_int* ipiv,
double* b, lapack_int ldb );
lapack_int LAPACKE_cgesv( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zgesv( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_dsgesv( int matrix_order, lapack_int n, lapack_int nrhs,
double* a, lapack_int lda, lapack_int* ipiv,
double* b, lapack_int ldb, double* x, lapack_int ldx,
lapack_int* iter );
lapack_int LAPACKE_zcgesv( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, lapack_int* iter );
lapack_int LAPACKE_sgesvd( int matrix_order, char jobu, char jobvt,
lapack_int m, lapack_int n, float* a, lapack_int lda,
float* s, float* u, lapack_int ldu, float* vt,
lapack_int ldvt, float* superb );
lapack_int LAPACKE_dgesvd( int matrix_order, char jobu, char jobvt,
lapack_int m, lapack_int n, double* a,
lapack_int lda, double* s, double* u, lapack_int ldu,
double* vt, lapack_int ldvt, double* superb );
lapack_int LAPACKE_cgesvd( int matrix_order, char jobu, char jobvt,
lapack_int m, lapack_int n, lapack_complex_float* a,
lapack_int lda, float* s, lapack_complex_float* u,
lapack_int ldu, lapack_complex_float* vt,
lapack_int ldvt, float* superb );
lapack_int LAPACKE_zgesvd( int matrix_order, char jobu, char jobvt,
lapack_int m, lapack_int n, lapack_complex_double* a,
lapack_int lda, double* s, lapack_complex_double* u,
lapack_int ldu, lapack_complex_double* vt,
lapack_int ldvt, double* superb );
lapack_int LAPACKE_sgesvj( int matrix_order, char joba, char jobu, char jobv,
lapack_int m, lapack_int n, float* a, lapack_int lda,
float* sva, lapack_int mv, float* v, lapack_int ldv,
float* stat );
lapack_int LAPACKE_dgesvj( int matrix_order, char joba, char jobu, char jobv,
lapack_int m, lapack_int n, double* a,
lapack_int lda, double* sva, lapack_int mv,
double* v, lapack_int ldv, double* stat );
lapack_int LAPACKE_sgesvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* r, float* c,
float* b, lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
float* rpivot );
lapack_int LAPACKE_dgesvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* r, double* c,
double* b, lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
double* rpivot );
lapack_int LAPACKE_cgesvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* r, float* c,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
float* rpivot );
lapack_int LAPACKE_zgesvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* r, double* c,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
double* rpivot );
lapack_int LAPACKE_sgesvxx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* r, float* c,
float* b, lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_dgesvxx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* r, double* c,
double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* rpvgrw,
double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_cgesvxx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* r, float* c,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_zgesvxx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* r, double* c,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params );
lapack_int LAPACKE_sgetf2( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_dgetf2( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_cgetf2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_zgetf2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_sgetrf( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_dgetrf( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_cgetrf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_zgetrf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_sgetri( int matrix_order, lapack_int n, float* a,
lapack_int lda, const lapack_int* ipiv );
lapack_int LAPACKE_dgetri( int matrix_order, lapack_int n, double* a,
lapack_int lda, const lapack_int* ipiv );
lapack_int LAPACKE_cgetri( int matrix_order, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_zgetri( int matrix_order, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_sgetrs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const lapack_int* ipiv, float* b, lapack_int ldb );
lapack_int LAPACKE_dgetrs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const double* a, lapack_int lda,
const lapack_int* ipiv, double* b, lapack_int ldb );
lapack_int LAPACKE_cgetrs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zgetrs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sggbak( int matrix_order, char job, char side, lapack_int n,
lapack_int ilo, lapack_int ihi, const float* lscale,
const float* rscale, lapack_int m, float* v,
lapack_int ldv );
lapack_int LAPACKE_dggbak( int matrix_order, char job, char side, lapack_int n,
lapack_int ilo, lapack_int ihi, const double* lscale,
const double* rscale, lapack_int m, double* v,
lapack_int ldv );
lapack_int LAPACKE_cggbak( int matrix_order, char job, char side, lapack_int n,
lapack_int ilo, lapack_int ihi, const float* lscale,
const float* rscale, lapack_int m,
lapack_complex_float* v, lapack_int ldv );
lapack_int LAPACKE_zggbak( int matrix_order, char job, char side, lapack_int n,
lapack_int ilo, lapack_int ihi, const double* lscale,
const double* rscale, lapack_int m,
lapack_complex_double* v, lapack_int ldv );
lapack_int LAPACKE_sggbal( int matrix_order, char job, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb,
lapack_int* ilo, lapack_int* ihi, float* lscale,
float* rscale );
lapack_int LAPACKE_dggbal( int matrix_order, char job, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb,
lapack_int* ilo, lapack_int* ihi, double* lscale,
double* rscale );
lapack_int LAPACKE_cggbal( int matrix_order, char job, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_int* ilo, lapack_int* ihi, float* lscale,
float* rscale );
lapack_int LAPACKE_zggbal( int matrix_order, char job, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_int* ilo, lapack_int* ihi, double* lscale,
double* rscale );
lapack_int LAPACKE_sgges( int matrix_order, char jobvsl, char jobvsr, char sort,
LAPACK_S_SELECT3 selctg, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb,
lapack_int* sdim, float* alphar, float* alphai,
float* beta, float* vsl, lapack_int ldvsl, float* vsr,
lapack_int ldvsr );
lapack_int LAPACKE_dgges( int matrix_order, char jobvsl, char jobvsr, char sort,
LAPACK_D_SELECT3 selctg, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb,
lapack_int* sdim, double* alphar, double* alphai,
double* beta, double* vsl, lapack_int ldvsl,
double* vsr, lapack_int ldvsr );
lapack_int LAPACKE_cgges( int matrix_order, char jobvsl, char jobvsr, char sort,
LAPACK_C_SELECT2 selctg, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_int* sdim, lapack_complex_float* alpha,
lapack_complex_float* beta, lapack_complex_float* vsl,
lapack_int ldvsl, lapack_complex_float* vsr,
lapack_int ldvsr );
lapack_int LAPACKE_zgges( int matrix_order, char jobvsl, char jobvsr, char sort,
LAPACK_Z_SELECT2 selctg, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_int* sdim, lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* vsl, lapack_int ldvsl,
lapack_complex_double* vsr, lapack_int ldvsr );
lapack_int LAPACKE_sggesx( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_S_SELECT3 selctg, char sense,
lapack_int n, float* a, lapack_int lda, float* b,
lapack_int ldb, lapack_int* sdim, float* alphar,
float* alphai, float* beta, float* vsl,
lapack_int ldvsl, float* vsr, lapack_int ldvsr,
float* rconde, float* rcondv );
lapack_int LAPACKE_dggesx( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_D_SELECT3 selctg, char sense,
lapack_int n, double* a, lapack_int lda, double* b,
lapack_int ldb, lapack_int* sdim, double* alphar,
double* alphai, double* beta, double* vsl,
lapack_int ldvsl, double* vsr, lapack_int ldvsr,
double* rconde, double* rcondv );
lapack_int LAPACKE_cggesx( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_C_SELECT2 selctg, char sense,
lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, lapack_int* sdim,
lapack_complex_float* alpha,
lapack_complex_float* beta,
lapack_complex_float* vsl, lapack_int ldvsl,
lapack_complex_float* vsr, lapack_int ldvsr,
float* rconde, float* rcondv );
lapack_int LAPACKE_zggesx( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_Z_SELECT2 selctg, char sense,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_int* sdim,
lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* vsl, lapack_int ldvsl,
lapack_complex_double* vsr, lapack_int ldvsr,
double* rconde, double* rcondv );
lapack_int LAPACKE_sggev( int matrix_order, char jobvl, char jobvr,
lapack_int n, float* a, lapack_int lda, float* b,
lapack_int ldb, float* alphar, float* alphai,
float* beta, float* vl, lapack_int ldvl, float* vr,
lapack_int ldvr );
lapack_int LAPACKE_dggev( int matrix_order, char jobvl, char jobvr,
lapack_int n, double* a, lapack_int lda, double* b,
lapack_int ldb, double* alphar, double* alphai,
double* beta, double* vl, lapack_int ldvl, double* vr,
lapack_int ldvr );
lapack_int LAPACKE_cggev( int matrix_order, char jobvl, char jobvr,
lapack_int n, lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* alpha,
lapack_complex_float* beta, lapack_complex_float* vl,
lapack_int ldvl, lapack_complex_float* vr,
lapack_int ldvr );
lapack_int LAPACKE_zggev( int matrix_order, char jobvl, char jobvr,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr );
lapack_int LAPACKE_sggevx( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb,
float* alphar, float* alphai, float* beta, float* vl,
lapack_int ldvl, float* vr, lapack_int ldvr,
lapack_int* ilo, lapack_int* ihi, float* lscale,
float* rscale, float* abnrm, float* bbnrm,
float* rconde, float* rcondv );
lapack_int LAPACKE_dggevx( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb,
double* alphar, double* alphai, double* beta,
double* vl, lapack_int ldvl, double* vr,
lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,
double* lscale, double* rscale, double* abnrm,
double* bbnrm, double* rconde, double* rcondv );
lapack_int LAPACKE_cggevx( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* alpha,
lapack_complex_float* beta, lapack_complex_float* vl,
lapack_int ldvl, lapack_complex_float* vr,
lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,
float* lscale, float* rscale, float* abnrm,
float* bbnrm, float* rconde, float* rcondv );
lapack_int LAPACKE_zggevx( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_int* ilo, lapack_int* ihi, double* lscale,
double* rscale, double* abnrm, double* bbnrm,
double* rconde, double* rcondv );
lapack_int LAPACKE_sggglm( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, float* a, lapack_int lda, float* b,
lapack_int ldb, float* d, float* x, float* y );
lapack_int LAPACKE_dggglm( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, double* a, lapack_int lda, double* b,
lapack_int ldb, double* d, double* x, double* y );
lapack_int LAPACKE_cggglm( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* d,
lapack_complex_float* x, lapack_complex_float* y );
lapack_int LAPACKE_zggglm( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* d,
lapack_complex_double* x, lapack_complex_double* y );
lapack_int LAPACKE_sgghrd( int matrix_order, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
float* a, lapack_int lda, float* b, lapack_int ldb,
float* q, lapack_int ldq, float* z, lapack_int ldz );
lapack_int LAPACKE_dgghrd( int matrix_order, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
double* a, lapack_int lda, double* b, lapack_int ldb,
double* q, lapack_int ldq, double* z,
lapack_int ldz );
lapack_int LAPACKE_cgghrd( int matrix_order, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zgghrd( int matrix_order, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* z, lapack_int ldz );
lapack_int LAPACKE_sgglse( int matrix_order, lapack_int m, lapack_int n,
lapack_int p, float* a, lapack_int lda, float* b,
lapack_int ldb, float* c, float* d, float* x );
lapack_int LAPACKE_dgglse( int matrix_order, lapack_int m, lapack_int n,
lapack_int p, double* a, lapack_int lda, double* b,
lapack_int ldb, double* c, double* d, double* x );
lapack_int LAPACKE_cgglse( int matrix_order, lapack_int m, lapack_int n,
lapack_int p, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* c,
lapack_complex_float* d, lapack_complex_float* x );
lapack_int LAPACKE_zgglse( int matrix_order, lapack_int m, lapack_int n,
lapack_int p, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* c,
lapack_complex_double* d, lapack_complex_double* x );
lapack_int LAPACKE_sggqrf( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, float* a, lapack_int lda, float* taua,
float* b, lapack_int ldb, float* taub );
lapack_int LAPACKE_dggqrf( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, double* a, lapack_int lda,
double* taua, double* b, lapack_int ldb,
double* taub );
lapack_int LAPACKE_cggqrf( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* taua,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* taub );
lapack_int LAPACKE_zggqrf( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* taua,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* taub );
lapack_int LAPACKE_sggrqf( int matrix_order, lapack_int m, lapack_int p,
lapack_int n, float* a, lapack_int lda, float* taua,
float* b, lapack_int ldb, float* taub );
lapack_int LAPACKE_dggrqf( int matrix_order, lapack_int m, lapack_int p,
lapack_int n, double* a, lapack_int lda,
double* taua, double* b, lapack_int ldb,
double* taub );
lapack_int LAPACKE_cggrqf( int matrix_order, lapack_int m, lapack_int p,
lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* taua,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* taub );
lapack_int LAPACKE_zggrqf( int matrix_order, lapack_int m, lapack_int p,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* taua,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* taub );
lapack_int LAPACKE_sggsvd( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int n, lapack_int p,
lapack_int* k, lapack_int* l, float* a,
lapack_int lda, float* b, lapack_int ldb,
float* alpha, float* beta, float* u, lapack_int ldu,
float* v, lapack_int ldv, float* q, lapack_int ldq,
lapack_int* iwork );
lapack_int LAPACKE_dggsvd( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int n, lapack_int p,
lapack_int* k, lapack_int* l, double* a,
lapack_int lda, double* b, lapack_int ldb,
double* alpha, double* beta, double* u,
lapack_int ldu, double* v, lapack_int ldv, double* q,
lapack_int ldq, lapack_int* iwork );
lapack_int LAPACKE_cggsvd( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int n, lapack_int p,
lapack_int* k, lapack_int* l,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
float* alpha, float* beta, lapack_complex_float* u,
lapack_int ldu, lapack_complex_float* v,
lapack_int ldv, lapack_complex_float* q,
lapack_int ldq, lapack_int* iwork );
lapack_int LAPACKE_zggsvd( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int n, lapack_int p,
lapack_int* k, lapack_int* l,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
double* alpha, double* beta,
lapack_complex_double* u, lapack_int ldu,
lapack_complex_double* v, lapack_int ldv,
lapack_complex_double* q, lapack_int ldq,
lapack_int* iwork );
lapack_int LAPACKE_sggsvp( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int p, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb, float tola,
float tolb, lapack_int* k, lapack_int* l, float* u,
lapack_int ldu, float* v, lapack_int ldv, float* q,
lapack_int ldq );
lapack_int LAPACKE_dggsvp( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int p, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb,
double tola, double tolb, lapack_int* k,
lapack_int* l, double* u, lapack_int ldu, double* v,
lapack_int ldv, double* q, lapack_int ldq );
lapack_int LAPACKE_cggsvp( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int p, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb, float tola,
float tolb, lapack_int* k, lapack_int* l,
lapack_complex_float* u, lapack_int ldu,
lapack_complex_float* v, lapack_int ldv,
lapack_complex_float* q, lapack_int ldq );
lapack_int LAPACKE_zggsvp( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int p, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
double tola, double tolb, lapack_int* k,
lapack_int* l, lapack_complex_double* u,
lapack_int ldu, lapack_complex_double* v,
lapack_int ldv, lapack_complex_double* q,
lapack_int ldq );
lapack_int LAPACKE_sgtcon( char norm, lapack_int n, const float* dl,
const float* d, const float* du, const float* du2,
const lapack_int* ipiv, float anorm, float* rcond );
lapack_int LAPACKE_dgtcon( char norm, lapack_int n, const double* dl,
const double* d, const double* du, const double* du2,
const lapack_int* ipiv, double anorm,
double* rcond );
lapack_int LAPACKE_cgtcon( char norm, lapack_int n,
const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
const lapack_complex_float* du2,
const lapack_int* ipiv, float anorm, float* rcond );
lapack_int LAPACKE_zgtcon( char norm, lapack_int n,
const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
const lapack_complex_double* du2,
const lapack_int* ipiv, double anorm,
double* rcond );
lapack_int LAPACKE_sgtrfs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const float* dl, const float* d,
const float* du, const float* dlf, const float* df,
const float* duf, const float* du2,
const lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr );
lapack_int LAPACKE_dgtrfs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const double* dl, const double* d,
const double* du, const double* dlf,
const double* df, const double* duf,
const double* du2, const lapack_int* ipiv,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* ferr, double* berr );
lapack_int LAPACKE_cgtrfs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
const lapack_complex_float* dlf,
const lapack_complex_float* df,
const lapack_complex_float* duf,
const lapack_complex_float* du2,
const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zgtrfs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
const lapack_complex_double* dlf,
const lapack_complex_double* df,
const lapack_complex_double* duf,
const lapack_complex_double* du2,
const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_sgtsv( int matrix_order, lapack_int n, lapack_int nrhs,
float* dl, float* d, float* du, float* b,
lapack_int ldb );
lapack_int LAPACKE_dgtsv( int matrix_order, lapack_int n, lapack_int nrhs,
double* dl, double* d, double* du, double* b,
lapack_int ldb );
lapack_int LAPACKE_cgtsv( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_float* dl, lapack_complex_float* d,
lapack_complex_float* du, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zgtsv( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_double* dl, lapack_complex_double* d,
lapack_complex_double* du, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_sgtsvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, const float* dl,
const float* d, const float* du, float* dlf,
float* df, float* duf, float* du2, lapack_int* ipiv,
const float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr );
lapack_int LAPACKE_dgtsvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, const double* dl,
const double* d, const double* du, double* dlf,
double* df, double* duf, double* du2,
lapack_int* ipiv, const double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* ferr, double* berr );
lapack_int LAPACKE_cgtsvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
lapack_complex_float* dlf, lapack_complex_float* df,
lapack_complex_float* duf, lapack_complex_float* du2,
lapack_int* ipiv, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr );
lapack_int LAPACKE_zgtsvx( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
lapack_complex_double* dlf,
lapack_complex_double* df,
lapack_complex_double* duf,
lapack_complex_double* du2, lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_sgttrf( lapack_int n, float* dl, float* d, float* du,
float* du2, lapack_int* ipiv );
lapack_int LAPACKE_dgttrf( lapack_int n, double* dl, double* d, double* du,
double* du2, lapack_int* ipiv );
lapack_int LAPACKE_cgttrf( lapack_int n, lapack_complex_float* dl,
lapack_complex_float* d, lapack_complex_float* du,
lapack_complex_float* du2, lapack_int* ipiv );
lapack_int LAPACKE_zgttrf( lapack_int n, lapack_complex_double* dl,
lapack_complex_double* d, lapack_complex_double* du,
lapack_complex_double* du2, lapack_int* ipiv );
lapack_int LAPACKE_sgttrs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const float* dl, const float* d,
const float* du, const float* du2,
const lapack_int* ipiv, float* b, lapack_int ldb );
lapack_int LAPACKE_dgttrs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const double* dl, const double* d,
const double* du, const double* du2,
const lapack_int* ipiv, double* b, lapack_int ldb );
lapack_int LAPACKE_cgttrs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
const lapack_complex_float* du2,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zgttrs( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
const lapack_complex_double* du2,
const lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_chbev( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int kd, lapack_complex_float* ab,
lapack_int ldab, float* w, lapack_complex_float* z,
lapack_int ldz );
lapack_int LAPACKE_zhbev( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int kd, lapack_complex_double* ab,
lapack_int ldab, double* w, lapack_complex_double* z,
lapack_int ldz );
lapack_int LAPACKE_chbevd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int kd, lapack_complex_float* ab,
lapack_int ldab, float* w, lapack_complex_float* z,
lapack_int ldz );
lapack_int LAPACKE_zhbevd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int kd, lapack_complex_double* ab,
lapack_int ldab, double* w, lapack_complex_double* z,
lapack_int ldz );
lapack_int LAPACKE_chbevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_int kd,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* q, lapack_int ldq, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_zhbevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_int kd,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* q, lapack_int ldq, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_chbgst( int matrix_order, char vect, char uplo, lapack_int n,
lapack_int ka, lapack_int kb,
lapack_complex_float* ab, lapack_int ldab,
const lapack_complex_float* bb, lapack_int ldbb,
lapack_complex_float* x, lapack_int ldx );
lapack_int LAPACKE_zhbgst( int matrix_order, char vect, char uplo, lapack_int n,
lapack_int ka, lapack_int kb,
lapack_complex_double* ab, lapack_int ldab,
const lapack_complex_double* bb, lapack_int ldbb,
lapack_complex_double* x, lapack_int ldx );
lapack_int LAPACKE_chbgv( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int ka, lapack_int kb,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* bb, lapack_int ldbb, float* w,
lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zhbgv( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int ka, lapack_int kb,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* bb, lapack_int ldbb, double* w,
lapack_complex_double* z, lapack_int ldz );
lapack_int LAPACKE_chbgvd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int ka, lapack_int kb,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* bb, lapack_int ldbb, float* w,
lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zhbgvd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int ka, lapack_int kb,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* bb, lapack_int ldbb,
double* w, lapack_complex_double* z,
lapack_int ldz );
lapack_int LAPACKE_chbgvx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* bb, lapack_int ldbb,
lapack_complex_float* q, lapack_int ldq, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_zhbgvx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* bb, lapack_int ldbb,
lapack_complex_double* q, lapack_int ldq, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_chbtrd( int matrix_order, char vect, char uplo, lapack_int n,
lapack_int kd, lapack_complex_float* ab,
lapack_int ldab, float* d, float* e,
lapack_complex_float* q, lapack_int ldq );
lapack_int LAPACKE_zhbtrd( int matrix_order, char vect, char uplo, lapack_int n,
lapack_int kd, lapack_complex_double* ab,
lapack_int ldab, double* d, double* e,
lapack_complex_double* q, lapack_int ldq );
lapack_int LAPACKE_checon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv, float anorm, float* rcond );
lapack_int LAPACKE_zhecon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv, double anorm,
double* rcond );
lapack_int LAPACKE_cheequb( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* s, float* scond, float* amax );
lapack_int LAPACKE_zheequb( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* s, double* scond, double* amax );
lapack_int LAPACKE_cheev( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda, float* w );
lapack_int LAPACKE_zheev( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda, double* w );
lapack_int LAPACKE_cheevd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda, float* w );
lapack_int LAPACKE_zheevd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
double* w );
lapack_int LAPACKE_cheevr( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_complex_float* a,
lapack_int lda, float vl, float vu, lapack_int il,
lapack_int iu, float abstol, lapack_int* m, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_int* isuppz );
lapack_int LAPACKE_zheevr( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_complex_double* a,
lapack_int lda, double vl, double vu, lapack_int il,
lapack_int iu, double abstol, lapack_int* m,
double* w, lapack_complex_double* z, lapack_int ldz,
lapack_int* isuppz );
lapack_int LAPACKE_cheevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_complex_float* a,
lapack_int lda, float vl, float vu, lapack_int il,
lapack_int iu, float abstol, lapack_int* m, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_zheevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_complex_double* a,
lapack_int lda, double vl, double vu, lapack_int il,
lapack_int iu, double abstol, lapack_int* m,
double* w, lapack_complex_double* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_chegst( int matrix_order, lapack_int itype, char uplo,
lapack_int n, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zhegst( int matrix_order, lapack_int itype, char uplo,
lapack_int n, lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_chegv( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, float* w );
lapack_int LAPACKE_zhegv( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, double* w );
lapack_int LAPACKE_chegvd( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, float* w );
lapack_int LAPACKE_zhegvd( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, double* w );
lapack_int LAPACKE_chegvx( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_zhegvx( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_cherfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zherfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_cherfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* af, lapack_int ldaf,
const lapack_int* ipiv, const float* s,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params );
lapack_int LAPACKE_zherfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* af, lapack_int ldaf,
const lapack_int* ipiv, const double* s,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_chesv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zhesv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_chesvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, lapack_complex_float* af,
lapack_int ldaf, lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr );
lapack_int LAPACKE_zhesvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, lapack_complex_double* af,
lapack_int ldaf, lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_chesvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* s,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_zhesvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* s,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params );
lapack_int LAPACKE_chetrd( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda, float* d,
float* e, lapack_complex_float* tau );
lapack_int LAPACKE_zhetrd( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda, double* d,
double* e, lapack_complex_double* tau );
lapack_int LAPACKE_chetrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_zhetrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_chetri( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_zhetri( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_chetrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zhetrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_chfrk( int matrix_order, char transr, char uplo, char trans,
lapack_int n, lapack_int k, float alpha,
const lapack_complex_float* a, lapack_int lda,
float beta, lapack_complex_float* c );
lapack_int LAPACKE_zhfrk( int matrix_order, char transr, char uplo, char trans,
lapack_int n, lapack_int k, double alpha,
const lapack_complex_double* a, lapack_int lda,
double beta, lapack_complex_double* c );
lapack_int LAPACKE_shgeqz( int matrix_order, char job, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
float* h, lapack_int ldh, float* t, lapack_int ldt,
float* alphar, float* alphai, float* beta, float* q,
lapack_int ldq, float* z, lapack_int ldz );
lapack_int LAPACKE_dhgeqz( int matrix_order, char job, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
double* h, lapack_int ldh, double* t, lapack_int ldt,
double* alphar, double* alphai, double* beta,
double* q, lapack_int ldq, double* z,
lapack_int ldz );
lapack_int LAPACKE_chgeqz( int matrix_order, char job, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
lapack_complex_float* h, lapack_int ldh,
lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* alpha,
lapack_complex_float* beta, lapack_complex_float* q,
lapack_int ldq, lapack_complex_float* z,
lapack_int ldz );
lapack_int LAPACKE_zhgeqz( int matrix_order, char job, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
lapack_complex_double* h, lapack_int ldh,
lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* z, lapack_int ldz );
lapack_int LAPACKE_chpcon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap,
const lapack_int* ipiv, float anorm, float* rcond );
lapack_int LAPACKE_zhpcon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap,
const lapack_int* ipiv, double anorm,
double* rcond );
lapack_int LAPACKE_chpev( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_complex_float* ap, float* w,
lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zhpev( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_complex_double* ap, double* w,
lapack_complex_double* z, lapack_int ldz );
lapack_int LAPACKE_chpevd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_complex_float* ap, float* w,
lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zhpevd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_complex_double* ap, double* w,
lapack_complex_double* z, lapack_int ldz );
lapack_int LAPACKE_chpevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_complex_float* ap, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_zhpevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_complex_double* ap, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_chpgst( int matrix_order, lapack_int itype, char uplo,
lapack_int n, lapack_complex_float* ap,
const lapack_complex_float* bp );
lapack_int LAPACKE_zhpgst( int matrix_order, lapack_int itype, char uplo,
lapack_int n, lapack_complex_double* ap,
const lapack_complex_double* bp );
lapack_int LAPACKE_chpgv( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, lapack_complex_float* ap,
lapack_complex_float* bp, float* w,
lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zhpgv( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, lapack_complex_double* ap,
lapack_complex_double* bp, double* w,
lapack_complex_double* z, lapack_int ldz );
lapack_int LAPACKE_chpgvd( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, lapack_complex_float* ap,
lapack_complex_float* bp, float* w,
lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zhpgvd( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, lapack_complex_double* ap,
lapack_complex_double* bp, double* w,
lapack_complex_double* z, lapack_int ldz );
lapack_int LAPACKE_chpgvx( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n,
lapack_complex_float* ap, lapack_complex_float* bp,
float vl, float vu, lapack_int il, lapack_int iu,
float abstol, lapack_int* m, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_zhpgvx( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n,
lapack_complex_double* ap, lapack_complex_double* bp,
double vl, double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_chprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_complex_float* afp,
const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zhprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* ap,
const lapack_complex_double* afp,
const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_chpsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* ap,
lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zhpsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* ap,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_chpsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
lapack_complex_float* afp, lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr );
lapack_int LAPACKE_zhpsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* ap,
lapack_complex_double* afp, lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_chptrd( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap, float* d, float* e,
lapack_complex_float* tau );
lapack_int LAPACKE_zhptrd( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap, double* d, double* e,
lapack_complex_double* tau );
lapack_int LAPACKE_chptrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap, lapack_int* ipiv );
lapack_int LAPACKE_zhptrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap, lapack_int* ipiv );
lapack_int LAPACKE_chptri( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap, const lapack_int* ipiv );
lapack_int LAPACKE_zhptri( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap, const lapack_int* ipiv );
lapack_int LAPACKE_chptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zhptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* ap,
const lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_shsein( int matrix_order, char job, char eigsrc, char initv,
lapack_logical* select, lapack_int n, const float* h,
lapack_int ldh, float* wr, const float* wi,
float* vl, lapack_int ldvl, float* vr,
lapack_int ldvr, lapack_int mm, lapack_int* m,
lapack_int* ifaill, lapack_int* ifailr );
lapack_int LAPACKE_dhsein( int matrix_order, char job, char eigsrc, char initv,
lapack_logical* select, lapack_int n,
const double* h, lapack_int ldh, double* wr,
const double* wi, double* vl, lapack_int ldvl,
double* vr, lapack_int ldvr, lapack_int mm,
lapack_int* m, lapack_int* ifaill,
lapack_int* ifailr );
lapack_int LAPACKE_chsein( int matrix_order, char job, char eigsrc, char initv,
const lapack_logical* select, lapack_int n,
const lapack_complex_float* h, lapack_int ldh,
lapack_complex_float* w, lapack_complex_float* vl,
lapack_int ldvl, lapack_complex_float* vr,
lapack_int ldvr, lapack_int mm, lapack_int* m,
lapack_int* ifaill, lapack_int* ifailr );
lapack_int LAPACKE_zhsein( int matrix_order, char job, char eigsrc, char initv,
const lapack_logical* select, lapack_int n,
const lapack_complex_double* h, lapack_int ldh,
lapack_complex_double* w, lapack_complex_double* vl,
lapack_int ldvl, lapack_complex_double* vr,
lapack_int ldvr, lapack_int mm, lapack_int* m,
lapack_int* ifaill, lapack_int* ifailr );
lapack_int LAPACKE_shseqr( int matrix_order, char job, char compz, lapack_int n,
lapack_int ilo, lapack_int ihi, float* h,
lapack_int ldh, float* wr, float* wi, float* z,
lapack_int ldz );
lapack_int LAPACKE_dhseqr( int matrix_order, char job, char compz, lapack_int n,
lapack_int ilo, lapack_int ihi, double* h,
lapack_int ldh, double* wr, double* wi, double* z,
lapack_int ldz );
lapack_int LAPACKE_chseqr( int matrix_order, char job, char compz, lapack_int n,
lapack_int ilo, lapack_int ihi,
lapack_complex_float* h, lapack_int ldh,
lapack_complex_float* w, lapack_complex_float* z,
lapack_int ldz );
lapack_int LAPACKE_zhseqr( int matrix_order, char job, char compz, lapack_int n,
lapack_int ilo, lapack_int ihi,
lapack_complex_double* h, lapack_int ldh,
lapack_complex_double* w, lapack_complex_double* z,
lapack_int ldz );
lapack_int LAPACKE_clacgv( lapack_int n, lapack_complex_float* x,
lapack_int incx );
lapack_int LAPACKE_zlacgv( lapack_int n, lapack_complex_double* x,
lapack_int incx );
lapack_int LAPACKE_slacpy( int matrix_order, char uplo, lapack_int m,
lapack_int n, const float* a, lapack_int lda, float* b,
lapack_int ldb );
lapack_int LAPACKE_dlacpy( int matrix_order, char uplo, lapack_int m,
lapack_int n, const double* a, lapack_int lda, double* b,
lapack_int ldb );
lapack_int LAPACKE_clacpy( int matrix_order, char uplo, lapack_int m,
lapack_int n, const lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zlacpy( int matrix_order, char uplo, lapack_int m,
lapack_int n, const lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_zlag2c( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
lapack_complex_float* sa, lapack_int ldsa );
lapack_int LAPACKE_slag2d( int matrix_order, lapack_int m, lapack_int n,
const float* sa, lapack_int ldsa, double* a,
lapack_int lda );
lapack_int LAPACKE_dlag2s( int matrix_order, lapack_int m, lapack_int n,
const double* a, lapack_int lda, float* sa,
lapack_int ldsa );
lapack_int LAPACKE_clag2z( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_float* sa, lapack_int ldsa,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_slagge( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const float* d,
float* a, lapack_int lda, lapack_int* iseed );
lapack_int LAPACKE_dlagge( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const double* d,
double* a, lapack_int lda, lapack_int* iseed );
lapack_int LAPACKE_clagge( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const float* d,
lapack_complex_float* a, lapack_int lda,
lapack_int* iseed );
lapack_int LAPACKE_zlagge( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const double* d,
lapack_complex_double* a, lapack_int lda,
lapack_int* iseed );
float LAPACKE_slamch( char cmach );
double LAPACKE_dlamch( char cmach );
float LAPACKE_slange( int matrix_order, char norm, lapack_int m,
lapack_int n, const float* a, lapack_int lda );
double LAPACKE_dlange( int matrix_order, char norm, lapack_int m,
lapack_int n, const double* a, lapack_int lda );
float LAPACKE_clange( int matrix_order, char norm, lapack_int m,
lapack_int n, const lapack_complex_float* a,
lapack_int lda );
double LAPACKE_zlange( int matrix_order, char norm, lapack_int m,
lapack_int n, const lapack_complex_double* a,
lapack_int lda );
float LAPACKE_clanhe( int matrix_order, char norm, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda );
double LAPACKE_zlanhe( int matrix_order, char norm, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda );
float LAPACKE_slansy( int matrix_order, char norm, char uplo, lapack_int n,
const float* a, lapack_int lda );
double LAPACKE_dlansy( int matrix_order, char norm, char uplo, lapack_int n,
const double* a, lapack_int lda );
float LAPACKE_clansy( int matrix_order, char norm, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda );
double LAPACKE_zlansy( int matrix_order, char norm, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda );
float LAPACKE_slantr( int matrix_order, char norm, char uplo, char diag,
lapack_int m, lapack_int n, const float* a,
lapack_int lda );
double LAPACKE_dlantr( int matrix_order, char norm, char uplo, char diag,
lapack_int m, lapack_int n, const double* a,
lapack_int lda );
float LAPACKE_clantr( int matrix_order, char norm, char uplo, char diag,
lapack_int m, lapack_int n, const lapack_complex_float* a,
lapack_int lda );
double LAPACKE_zlantr( int matrix_order, char norm, char uplo, char diag,
lapack_int m, lapack_int n, const lapack_complex_double* a,
lapack_int lda );
lapack_int LAPACKE_slarfb( int matrix_order, char side, char trans, char direct,
char storev, lapack_int m, lapack_int n,
lapack_int k, const float* v, lapack_int ldv,
const float* t, lapack_int ldt, float* c,
lapack_int ldc );
lapack_int LAPACKE_dlarfb( int matrix_order, char side, char trans, char direct,
char storev, lapack_int m, lapack_int n,
lapack_int k, const double* v, lapack_int ldv,
const double* t, lapack_int ldt, double* c,
lapack_int ldc );
lapack_int LAPACKE_clarfb( int matrix_order, char side, char trans, char direct,
char storev, lapack_int m, lapack_int n,
lapack_int k, const lapack_complex_float* v,
lapack_int ldv, const lapack_complex_float* t,
lapack_int ldt, lapack_complex_float* c,
lapack_int ldc );
lapack_int LAPACKE_zlarfb( int matrix_order, char side, char trans, char direct,
char storev, lapack_int m, lapack_int n,
lapack_int k, const lapack_complex_double* v,
lapack_int ldv, const lapack_complex_double* t,
lapack_int ldt, lapack_complex_double* c,
lapack_int ldc );
lapack_int LAPACKE_slarfg( lapack_int n, float* alpha, float* x,
lapack_int incx, float* tau );
lapack_int LAPACKE_dlarfg( lapack_int n, double* alpha, double* x,
lapack_int incx, double* tau );
lapack_int LAPACKE_clarfg( lapack_int n, lapack_complex_float* alpha,
lapack_complex_float* x, lapack_int incx,
lapack_complex_float* tau );
lapack_int LAPACKE_zlarfg( lapack_int n, lapack_complex_double* alpha,
lapack_complex_double* x, lapack_int incx,
lapack_complex_double* tau );
lapack_int LAPACKE_slarft( int matrix_order, char direct, char storev,
lapack_int n, lapack_int k, const float* v,
lapack_int ldv, const float* tau, float* t,
lapack_int ldt );
lapack_int LAPACKE_dlarft( int matrix_order, char direct, char storev,
lapack_int n, lapack_int k, const double* v,
lapack_int ldv, const double* tau, double* t,
lapack_int ldt );
lapack_int LAPACKE_clarft( int matrix_order, char direct, char storev,
lapack_int n, lapack_int k,
const lapack_complex_float* v, lapack_int ldv,
const lapack_complex_float* tau,
lapack_complex_float* t, lapack_int ldt );
lapack_int LAPACKE_zlarft( int matrix_order, char direct, char storev,
lapack_int n, lapack_int k,
const lapack_complex_double* v, lapack_int ldv,
const lapack_complex_double* tau,
lapack_complex_double* t, lapack_int ldt );
lapack_int LAPACKE_slarfx( int matrix_order, char side, lapack_int m,
lapack_int n, const float* v, float tau, float* c,
lapack_int ldc, float* work );
lapack_int LAPACKE_dlarfx( int matrix_order, char side, lapack_int m,
lapack_int n, const double* v, double tau, double* c,
lapack_int ldc, double* work );
lapack_int LAPACKE_clarfx( int matrix_order, char side, lapack_int m,
lapack_int n, const lapack_complex_float* v,
lapack_complex_float tau, lapack_complex_float* c,
lapack_int ldc, lapack_complex_float* work );
lapack_int LAPACKE_zlarfx( int matrix_order, char side, lapack_int m,
lapack_int n, const lapack_complex_double* v,
lapack_complex_double tau, lapack_complex_double* c,
lapack_int ldc, lapack_complex_double* work );
lapack_int LAPACKE_slarnv( lapack_int idist, lapack_int* iseed, lapack_int n,
float* x );
lapack_int LAPACKE_dlarnv( lapack_int idist, lapack_int* iseed, lapack_int n,
double* x );
lapack_int LAPACKE_clarnv( lapack_int idist, lapack_int* iseed, lapack_int n,
lapack_complex_float* x );
lapack_int LAPACKE_zlarnv( lapack_int idist, lapack_int* iseed, lapack_int n,
lapack_complex_double* x );
lapack_int LAPACKE_slaset( int matrix_order, char uplo, lapack_int m,
lapack_int n, float alpha, float beta, float* a,
lapack_int lda );
lapack_int LAPACKE_dlaset( int matrix_order, char uplo, lapack_int m,
lapack_int n, double alpha, double beta, double* a,
lapack_int lda );
lapack_int LAPACKE_claset( int matrix_order, char uplo, lapack_int m,
lapack_int n, lapack_complex_float alpha,
lapack_complex_float beta, lapack_complex_float* a,
lapack_int lda );
lapack_int LAPACKE_zlaset( int matrix_order, char uplo, lapack_int m,
lapack_int n, lapack_complex_double alpha,
lapack_complex_double beta, lapack_complex_double* a,
lapack_int lda );
lapack_int LAPACKE_slasrt( char id, lapack_int n, float* d );
lapack_int LAPACKE_dlasrt( char id, lapack_int n, double* d );
lapack_int LAPACKE_slaswp( int matrix_order, lapack_int n, float* a,
lapack_int lda, lapack_int k1, lapack_int k2,
const lapack_int* ipiv, lapack_int incx );
lapack_int LAPACKE_dlaswp( int matrix_order, lapack_int n, double* a,
lapack_int lda, lapack_int k1, lapack_int k2,
const lapack_int* ipiv, lapack_int incx );
lapack_int LAPACKE_claswp( int matrix_order, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int k1, lapack_int k2, const lapack_int* ipiv,
lapack_int incx );
lapack_int LAPACKE_zlaswp( int matrix_order, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int k1, lapack_int k2, const lapack_int* ipiv,
lapack_int incx );
lapack_int LAPACKE_slatms( int matrix_order, lapack_int m, lapack_int n,
char dist, lapack_int* iseed, char sym, float* d,
lapack_int mode, float cond, float dmax,
lapack_int kl, lapack_int ku, char pack, float* a,
lapack_int lda );
lapack_int LAPACKE_dlatms( int matrix_order, lapack_int m, lapack_int n,
char dist, lapack_int* iseed, char sym, double* d,
lapack_int mode, double cond, double dmax,
lapack_int kl, lapack_int ku, char pack, double* a,
lapack_int lda );
lapack_int LAPACKE_clatms( int matrix_order, lapack_int m, lapack_int n,
char dist, lapack_int* iseed, char sym, float* d,
lapack_int mode, float cond, float dmax,
lapack_int kl, lapack_int ku, char pack,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_zlatms( int matrix_order, lapack_int m, lapack_int n,
char dist, lapack_int* iseed, char sym, double* d,
lapack_int mode, double cond, double dmax,
lapack_int kl, lapack_int ku, char pack,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_slauum( int matrix_order, char uplo, lapack_int n, float* a,
lapack_int lda );
lapack_int LAPACKE_dlauum( int matrix_order, char uplo, lapack_int n, double* a,
lapack_int lda );
lapack_int LAPACKE_clauum( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_zlauum( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_sopgtr( int matrix_order, char uplo, lapack_int n,
const float* ap, const float* tau, float* q,
lapack_int ldq );
lapack_int LAPACKE_dopgtr( int matrix_order, char uplo, lapack_int n,
const double* ap, const double* tau, double* q,
lapack_int ldq );
lapack_int LAPACKE_sopmtr( int matrix_order, char side, char uplo, char trans,
lapack_int m, lapack_int n, const float* ap,
const float* tau, float* c, lapack_int ldc );
lapack_int LAPACKE_dopmtr( int matrix_order, char side, char uplo, char trans,
lapack_int m, lapack_int n, const double* ap,
const double* tau, double* c, lapack_int ldc );
lapack_int LAPACKE_sorgbr( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int k, float* a, lapack_int lda,
const float* tau );
lapack_int LAPACKE_dorgbr( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int k, double* a,
lapack_int lda, const double* tau );
lapack_int LAPACKE_sorghr( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, float* a, lapack_int lda,
const float* tau );
lapack_int LAPACKE_dorghr( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, double* a, lapack_int lda,
const double* tau );
lapack_int LAPACKE_sorglq( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, float* a, lapack_int lda,
const float* tau );
lapack_int LAPACKE_dorglq( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, double* a, lapack_int lda,
const double* tau );
lapack_int LAPACKE_sorgql( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, float* a, lapack_int lda,
const float* tau );
lapack_int LAPACKE_dorgql( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, double* a, lapack_int lda,
const double* tau );
lapack_int LAPACKE_sorgqr( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, float* a, lapack_int lda,
const float* tau );
lapack_int LAPACKE_dorgqr( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, double* a, lapack_int lda,
const double* tau );
lapack_int LAPACKE_sorgrq( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, float* a, lapack_int lda,
const float* tau );
lapack_int LAPACKE_dorgrq( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, double* a, lapack_int lda,
const double* tau );
lapack_int LAPACKE_sorgtr( int matrix_order, char uplo, lapack_int n, float* a,
lapack_int lda, const float* tau );
lapack_int LAPACKE_dorgtr( int matrix_order, char uplo, lapack_int n, double* a,
lapack_int lda, const double* tau );
lapack_int LAPACKE_sormbr( int matrix_order, char vect, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const float* a, lapack_int lda, const float* tau,
float* c, lapack_int ldc );
lapack_int LAPACKE_dormbr( int matrix_order, char vect, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const double* a, lapack_int lda, const double* tau,
double* c, lapack_int ldc );
lapack_int LAPACKE_sormhr( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int ilo,
lapack_int ihi, const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc );
lapack_int LAPACKE_dormhr( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int ilo,
lapack_int ihi, const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc );
lapack_int LAPACKE_sormlq( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const float* a, lapack_int lda, const float* tau,
float* c, lapack_int ldc );
lapack_int LAPACKE_dormlq( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const double* a, lapack_int lda, const double* tau,
double* c, lapack_int ldc );
lapack_int LAPACKE_sormql( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const float* a, lapack_int lda, const float* tau,
float* c, lapack_int ldc );
lapack_int LAPACKE_dormql( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const double* a, lapack_int lda, const double* tau,
double* c, lapack_int ldc );
lapack_int LAPACKE_sormqr( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const float* a, lapack_int lda, const float* tau,
float* c, lapack_int ldc );
lapack_int LAPACKE_dormqr( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const double* a, lapack_int lda, const double* tau,
double* c, lapack_int ldc );
lapack_int LAPACKE_sormrq( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const float* a, lapack_int lda, const float* tau,
float* c, lapack_int ldc );
lapack_int LAPACKE_dormrq( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const double* a, lapack_int lda, const double* tau,
double* c, lapack_int ldc );
lapack_int LAPACKE_sormrz( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc );
lapack_int LAPACKE_dormrz( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc );
lapack_int LAPACKE_sormtr( int matrix_order, char side, char uplo, char trans,
lapack_int m, lapack_int n, const float* a,
lapack_int lda, const float* tau, float* c,
lapack_int ldc );
lapack_int LAPACKE_dormtr( int matrix_order, char side, char uplo, char trans,
lapack_int m, lapack_int n, const double* a,
lapack_int lda, const double* tau, double* c,
lapack_int ldc );
lapack_int LAPACKE_spbcon( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const float* ab, lapack_int ldab,
float anorm, float* rcond );
lapack_int LAPACKE_dpbcon( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const double* ab, lapack_int ldab,
double anorm, double* rcond );
lapack_int LAPACKE_cpbcon( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const lapack_complex_float* ab,
lapack_int ldab, float anorm, float* rcond );
lapack_int LAPACKE_zpbcon( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const lapack_complex_double* ab,
lapack_int ldab, double anorm, double* rcond );
lapack_int LAPACKE_spbequ( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const float* ab, lapack_int ldab,
float* s, float* scond, float* amax );
lapack_int LAPACKE_dpbequ( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const double* ab, lapack_int ldab,
double* s, double* scond, double* amax );
lapack_int LAPACKE_cpbequ( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const lapack_complex_float* ab,
lapack_int ldab, float* s, float* scond,
float* amax );
lapack_int LAPACKE_zpbequ( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const lapack_complex_double* ab,
lapack_int ldab, double* s, double* scond,
double* amax );
lapack_int LAPACKE_spbrfs( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, const float* ab,
lapack_int ldab, const float* afb, lapack_int ldafb,
const float* b, lapack_int ldb, float* x,
lapack_int ldx, float* ferr, float* berr );
lapack_int LAPACKE_dpbrfs( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, const double* ab,
lapack_int ldab, const double* afb, lapack_int ldafb,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* ferr, double* berr );
lapack_int LAPACKE_cpbrfs( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
const lapack_complex_float* afb, lapack_int ldafb,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zpbrfs( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const lapack_complex_double* ab, lapack_int ldab,
const lapack_complex_double* afb, lapack_int ldafb,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_spbstf( int matrix_order, char uplo, lapack_int n,
lapack_int kb, float* bb, lapack_int ldbb );
lapack_int LAPACKE_dpbstf( int matrix_order, char uplo, lapack_int n,
lapack_int kb, double* bb, lapack_int ldbb );
lapack_int LAPACKE_cpbstf( int matrix_order, char uplo, lapack_int n,
lapack_int kb, lapack_complex_float* bb,
lapack_int ldbb );
lapack_int LAPACKE_zpbstf( int matrix_order, char uplo, lapack_int n,
lapack_int kb, lapack_complex_double* bb,
lapack_int ldbb );
lapack_int LAPACKE_spbsv( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, float* ab,
lapack_int ldab, float* b, lapack_int ldb );
lapack_int LAPACKE_dpbsv( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, double* ab,
lapack_int ldab, double* b, lapack_int ldb );
lapack_int LAPACKE_cpbsv( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpbsv( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_spbsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, float* ab,
lapack_int ldab, float* afb, lapack_int ldafb,
char* equed, float* s, float* b, lapack_int ldb,
float* x, lapack_int ldx, float* rcond, float* ferr,
float* berr );
lapack_int LAPACKE_dpbsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, double* ab,
lapack_int ldab, double* afb, lapack_int ldafb,
char* equed, double* s, double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* ferr, double* berr );
lapack_int LAPACKE_cpbsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* afb, lapack_int ldafb,
char* equed, float* s, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr );
lapack_int LAPACKE_zpbsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* afb, lapack_int ldafb,
char* equed, double* s, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* ferr,
double* berr );
lapack_int LAPACKE_spbtrf( int matrix_order, char uplo, lapack_int n,
lapack_int kd, float* ab, lapack_int ldab );
lapack_int LAPACKE_dpbtrf( int matrix_order, char uplo, lapack_int n,
lapack_int kd, double* ab, lapack_int ldab );
lapack_int LAPACKE_cpbtrf( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_complex_float* ab,
lapack_int ldab );
lapack_int LAPACKE_zpbtrf( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_complex_double* ab,
lapack_int ldab );
lapack_int LAPACKE_spbtrs( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, const float* ab,
lapack_int ldab, float* b, lapack_int ldb );
lapack_int LAPACKE_dpbtrs( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, const double* ab,
lapack_int ldab, double* b, lapack_int ldb );
lapack_int LAPACKE_cpbtrs( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpbtrs( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_spftrf( int matrix_order, char transr, char uplo,
lapack_int n, float* a );
lapack_int LAPACKE_dpftrf( int matrix_order, char transr, char uplo,
lapack_int n, double* a );
lapack_int LAPACKE_cpftrf( int matrix_order, char transr, char uplo,
lapack_int n, lapack_complex_float* a );
lapack_int LAPACKE_zpftrf( int matrix_order, char transr, char uplo,
lapack_int n, lapack_complex_double* a );
lapack_int LAPACKE_spftri( int matrix_order, char transr, char uplo,
lapack_int n, float* a );
lapack_int LAPACKE_dpftri( int matrix_order, char transr, char uplo,
lapack_int n, double* a );
lapack_int LAPACKE_cpftri( int matrix_order, char transr, char uplo,
lapack_int n, lapack_complex_float* a );
lapack_int LAPACKE_zpftri( int matrix_order, char transr, char uplo,
lapack_int n, lapack_complex_double* a );
lapack_int LAPACKE_spftrs( int matrix_order, char transr, char uplo,
lapack_int n, lapack_int nrhs, const float* a,
float* b, lapack_int ldb );
lapack_int LAPACKE_dpftrs( int matrix_order, char transr, char uplo,
lapack_int n, lapack_int nrhs, const double* a,
double* b, lapack_int ldb );
lapack_int LAPACKE_cpftrs( int matrix_order, char transr, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpftrs( int matrix_order, char transr, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_spocon( int matrix_order, char uplo, lapack_int n,
const float* a, lapack_int lda, float anorm,
float* rcond );
lapack_int LAPACKE_dpocon( int matrix_order, char uplo, lapack_int n,
const double* a, lapack_int lda, double anorm,
double* rcond );
lapack_int LAPACKE_cpocon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float anorm, float* rcond );
lapack_int LAPACKE_zpocon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double anorm, double* rcond );
lapack_int LAPACKE_spoequ( int matrix_order, lapack_int n, const float* a,
lapack_int lda, float* s, float* scond,
float* amax );
lapack_int LAPACKE_dpoequ( int matrix_order, lapack_int n, const double* a,
lapack_int lda, double* s, double* scond,
double* amax );
lapack_int LAPACKE_cpoequ( int matrix_order, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* s, float* scond, float* amax );
lapack_int LAPACKE_zpoequ( int matrix_order, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* s, double* scond, double* amax );
lapack_int LAPACKE_spoequb( int matrix_order, lapack_int n, const float* a,
lapack_int lda, float* s, float* scond,
float* amax );
lapack_int LAPACKE_dpoequb( int matrix_order, lapack_int n, const double* a,
lapack_int lda, double* s, double* scond,
double* amax );
lapack_int LAPACKE_cpoequb( int matrix_order, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* s, float* scond, float* amax );
lapack_int LAPACKE_zpoequb( int matrix_order, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* s, double* scond, double* amax );
lapack_int LAPACKE_sporfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const float* af, lapack_int ldaf, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr );
lapack_int LAPACKE_dporfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a, lapack_int lda,
const double* af, lapack_int ldaf, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_cporfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* af,
lapack_int ldaf, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* ferr, float* berr );
lapack_int LAPACKE_zporfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* af,
lapack_int ldaf, const lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* ferr, double* berr );
lapack_int LAPACKE_sporfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs, const float* a,
lapack_int lda, const float* af, lapack_int ldaf,
const float* s, const float* b, lapack_int ldb,
float* x, lapack_int ldx, float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_dporfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs, const double* a,
lapack_int lda, const double* af, lapack_int ldaf,
const double* s, const double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_cporfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* af, lapack_int ldaf,
const float* s, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_zporfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* af, lapack_int ldaf,
const double* s, const lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params );
lapack_int LAPACKE_sposv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda, float* b,
lapack_int ldb );
lapack_int LAPACKE_dposv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda, double* b,
lapack_int ldb );
lapack_int LAPACKE_cposv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zposv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_dsposv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* b, lapack_int ldb, double* x, lapack_int ldx,
lapack_int* iter );
lapack_int LAPACKE_zcposv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, lapack_int* iter );
lapack_int LAPACKE_sposvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda, float* af,
lapack_int ldaf, char* equed, float* s, float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr );
lapack_int LAPACKE_dposvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* af, lapack_int ldaf, char* equed, double* s,
double* b, lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_cposvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* af,
lapack_int ldaf, char* equed, float* s,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr );
lapack_int LAPACKE_zposvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* af,
lapack_int ldaf, char* equed, double* s,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_sposvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
char* equed, float* s, float* b, lapack_int ldb,
float* x, lapack_int ldx, float* rcond,
float* rpvgrw, float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params );
lapack_int LAPACKE_dposvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* af, lapack_int ldaf,
char* equed, double* s, double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* rpvgrw, double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_cposvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
char* equed, float* s, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* rpvgrw,
float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params );
lapack_int LAPACKE_zposvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
char* equed, double* s, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* rpvgrw,
double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_spotrf( int matrix_order, char uplo, lapack_int n, float* a,
lapack_int lda );
lapack_int LAPACKE_dpotrf( int matrix_order, char uplo, lapack_int n, double* a,
lapack_int lda );
lapack_int LAPACKE_cpotrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_zpotrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_spotri( int matrix_order, char uplo, lapack_int n, float* a,
lapack_int lda );
lapack_int LAPACKE_dpotri( int matrix_order, char uplo, lapack_int n, double* a,
lapack_int lda );
lapack_int LAPACKE_cpotri( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_zpotri( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_spotrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
float* b, lapack_int ldb );
lapack_int LAPACKE_dpotrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a, lapack_int lda,
double* b, lapack_int ldb );
lapack_int LAPACKE_cpotrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zpotrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_sppcon( int matrix_order, char uplo, lapack_int n,
const float* ap, float anorm, float* rcond );
lapack_int LAPACKE_dppcon( int matrix_order, char uplo, lapack_int n,
const double* ap, double anorm, double* rcond );
lapack_int LAPACKE_cppcon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap, float anorm,
float* rcond );
lapack_int LAPACKE_zppcon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap, double anorm,
double* rcond );
lapack_int LAPACKE_sppequ( int matrix_order, char uplo, lapack_int n,
const float* ap, float* s, float* scond,
float* amax );
lapack_int LAPACKE_dppequ( int matrix_order, char uplo, lapack_int n,
const double* ap, double* s, double* scond,
double* amax );
lapack_int LAPACKE_cppequ( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap, float* s,
float* scond, float* amax );
lapack_int LAPACKE_zppequ( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap, double* s,
double* scond, double* amax );
lapack_int LAPACKE_spprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* ap, const float* afp,
const float* b, lapack_int ldb, float* x,
lapack_int ldx, float* ferr, float* berr );
lapack_int LAPACKE_dpprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* ap, const double* afp,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* ferr, double* berr );
lapack_int LAPACKE_cpprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_complex_float* afp,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zpprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* ap,
const lapack_complex_double* afp,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_sppsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, float* ap, float* b,
lapack_int ldb );
lapack_int LAPACKE_dppsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* ap, double* b,
lapack_int ldb );
lapack_int LAPACKE_cppsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* ap,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zppsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* ap,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sppsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, float* ap, float* afp, char* equed,
float* s, float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr );
lapack_int LAPACKE_dppsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, double* ap, double* afp,
char* equed, double* s, double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* ferr, double* berr );
lapack_int LAPACKE_cppsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* ap,
lapack_complex_float* afp, char* equed, float* s,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr );
lapack_int LAPACKE_zppsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* ap,
lapack_complex_double* afp, char* equed, double* s,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_spptrf( int matrix_order, char uplo, lapack_int n,
float* ap );
lapack_int LAPACKE_dpptrf( int matrix_order, char uplo, lapack_int n,
double* ap );
lapack_int LAPACKE_cpptrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap );
lapack_int LAPACKE_zpptrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap );
lapack_int LAPACKE_spptri( int matrix_order, char uplo, lapack_int n,
float* ap );
lapack_int LAPACKE_dpptri( int matrix_order, char uplo, lapack_int n,
double* ap );
lapack_int LAPACKE_cpptri( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap );
lapack_int LAPACKE_zpptri( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap );
lapack_int LAPACKE_spptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* ap, float* b,
lapack_int ldb );
lapack_int LAPACKE_dpptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* ap, double* b,
lapack_int ldb );
lapack_int LAPACKE_cpptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* ap,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_spstrf( int matrix_order, char uplo, lapack_int n, float* a,
lapack_int lda, lapack_int* piv, lapack_int* rank,
float tol );
lapack_int LAPACKE_dpstrf( int matrix_order, char uplo, lapack_int n, double* a,
lapack_int lda, lapack_int* piv, lapack_int* rank,
double tol );
lapack_int LAPACKE_cpstrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* piv, lapack_int* rank, float tol );
lapack_int LAPACKE_zpstrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* piv, lapack_int* rank, double tol );
lapack_int LAPACKE_sptcon( lapack_int n, const float* d, const float* e,
float anorm, float* rcond );
lapack_int LAPACKE_dptcon( lapack_int n, const double* d, const double* e,
double anorm, double* rcond );
lapack_int LAPACKE_cptcon( lapack_int n, const float* d,
const lapack_complex_float* e, float anorm,
float* rcond );
lapack_int LAPACKE_zptcon( lapack_int n, const double* d,
const lapack_complex_double* e, double anorm,
double* rcond );
lapack_int LAPACKE_spteqr( int matrix_order, char compz, lapack_int n, float* d,
float* e, float* z, lapack_int ldz );
lapack_int LAPACKE_dpteqr( int matrix_order, char compz, lapack_int n,
double* d, double* e, double* z, lapack_int ldz );
lapack_int LAPACKE_cpteqr( int matrix_order, char compz, lapack_int n, float* d,
float* e, lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zpteqr( int matrix_order, char compz, lapack_int n,
double* d, double* e, lapack_complex_double* z,
lapack_int ldz );
lapack_int LAPACKE_sptrfs( int matrix_order, lapack_int n, lapack_int nrhs,
const float* d, const float* e, const float* df,
const float* ef, const float* b, lapack_int ldb,
float* x, lapack_int ldx, float* ferr, float* berr );
lapack_int LAPACKE_dptrfs( int matrix_order, lapack_int n, lapack_int nrhs,
const double* d, const double* e, const double* df,
const double* ef, const double* b, lapack_int ldb,
double* x, lapack_int ldx, double* ferr,
double* berr );
lapack_int LAPACKE_cptrfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* d,
const lapack_complex_float* e, const float* df,
const lapack_complex_float* ef,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zptrfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* d,
const lapack_complex_double* e, const double* df,
const lapack_complex_double* ef,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_sptsv( int matrix_order, lapack_int n, lapack_int nrhs,
float* d, float* e, float* b, lapack_int ldb );
lapack_int LAPACKE_dptsv( int matrix_order, lapack_int n, lapack_int nrhs,
double* d, double* e, double* b, lapack_int ldb );
lapack_int LAPACKE_cptsv( int matrix_order, lapack_int n, lapack_int nrhs,
float* d, lapack_complex_float* e,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zptsv( int matrix_order, lapack_int n, lapack_int nrhs,
double* d, lapack_complex_double* e,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sptsvx( int matrix_order, char fact, lapack_int n,
lapack_int nrhs, const float* d, const float* e,
float* df, float* ef, const float* b, lapack_int ldb,
float* x, lapack_int ldx, float* rcond, float* ferr,
float* berr );
lapack_int LAPACKE_dptsvx( int matrix_order, char fact, lapack_int n,
lapack_int nrhs, const double* d, const double* e,
double* df, double* ef, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_cptsvx( int matrix_order, char fact, lapack_int n,
lapack_int nrhs, const float* d,
const lapack_complex_float* e, float* df,
lapack_complex_float* ef,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr );
lapack_int LAPACKE_zptsvx( int matrix_order, char fact, lapack_int n,
lapack_int nrhs, const double* d,
const lapack_complex_double* e, double* df,
lapack_complex_double* ef,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_spttrf( lapack_int n, float* d, float* e );
lapack_int LAPACKE_dpttrf( lapack_int n, double* d, double* e );
lapack_int LAPACKE_cpttrf( lapack_int n, float* d, lapack_complex_float* e );
lapack_int LAPACKE_zpttrf( lapack_int n, double* d, lapack_complex_double* e );
lapack_int LAPACKE_spttrs( int matrix_order, lapack_int n, lapack_int nrhs,
const float* d, const float* e, float* b,
lapack_int ldb );
lapack_int LAPACKE_dpttrs( int matrix_order, lapack_int n, lapack_int nrhs,
const double* d, const double* e, double* b,
lapack_int ldb );
lapack_int LAPACKE_cpttrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* d,
const lapack_complex_float* e,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpttrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* d,
const lapack_complex_double* e,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_ssbev( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int kd, float* ab, lapack_int ldab, float* w,
float* z, lapack_int ldz );
lapack_int LAPACKE_dsbev( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int kd, double* ab, lapack_int ldab, double* w,
double* z, lapack_int ldz );
lapack_int LAPACKE_ssbevd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int kd, float* ab, lapack_int ldab, float* w,
float* z, lapack_int ldz );
lapack_int LAPACKE_dsbevd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int kd, double* ab, lapack_int ldab,
double* w, double* z, lapack_int ldz );
lapack_int LAPACKE_ssbevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_int kd, float* ab,
lapack_int ldab, float* q, lapack_int ldq, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_dsbevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_int kd, double* ab,
lapack_int ldab, double* q, lapack_int ldq,
double vl, double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_ssbgst( int matrix_order, char vect, char uplo, lapack_int n,
lapack_int ka, lapack_int kb, float* ab,
lapack_int ldab, const float* bb, lapack_int ldbb,
float* x, lapack_int ldx );
lapack_int LAPACKE_dsbgst( int matrix_order, char vect, char uplo, lapack_int n,
lapack_int ka, lapack_int kb, double* ab,
lapack_int ldab, const double* bb, lapack_int ldbb,
double* x, lapack_int ldx );
lapack_int LAPACKE_ssbgv( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int ka, lapack_int kb, float* ab,
lapack_int ldab, float* bb, lapack_int ldbb, float* w,
float* z, lapack_int ldz );
lapack_int LAPACKE_dsbgv( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int ka, lapack_int kb, double* ab,
lapack_int ldab, double* bb, lapack_int ldbb,
double* w, double* z, lapack_int ldz );
lapack_int LAPACKE_ssbgvd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int ka, lapack_int kb, float* ab,
lapack_int ldab, float* bb, lapack_int ldbb,
float* w, float* z, lapack_int ldz );
lapack_int LAPACKE_dsbgvd( int matrix_order, char jobz, char uplo, lapack_int n,
lapack_int ka, lapack_int kb, double* ab,
lapack_int ldab, double* bb, lapack_int ldbb,
double* w, double* z, lapack_int ldz );
lapack_int LAPACKE_ssbgvx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
float* ab, lapack_int ldab, float* bb,
lapack_int ldbb, float* q, lapack_int ldq, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_dsbgvx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
double* ab, lapack_int ldab, double* bb,
lapack_int ldbb, double* q, lapack_int ldq,
double vl, double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_ssbtrd( int matrix_order, char vect, char uplo, lapack_int n,
lapack_int kd, float* ab, lapack_int ldab, float* d,
float* e, float* q, lapack_int ldq );
lapack_int LAPACKE_dsbtrd( int matrix_order, char vect, char uplo, lapack_int n,
lapack_int kd, double* ab, lapack_int ldab,
double* d, double* e, double* q, lapack_int ldq );
lapack_int LAPACKE_ssfrk( int matrix_order, char transr, char uplo, char trans,
lapack_int n, lapack_int k, float alpha,
const float* a, lapack_int lda, float beta,
float* c );
lapack_int LAPACKE_dsfrk( int matrix_order, char transr, char uplo, char trans,
lapack_int n, lapack_int k, double alpha,
const double* a, lapack_int lda, double beta,
double* c );
lapack_int LAPACKE_sspcon( int matrix_order, char uplo, lapack_int n,
const float* ap, const lapack_int* ipiv, float anorm,
float* rcond );
lapack_int LAPACKE_dspcon( int matrix_order, char uplo, lapack_int n,
const double* ap, const lapack_int* ipiv,
double anorm, double* rcond );
lapack_int LAPACKE_cspcon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap,
const lapack_int* ipiv, float anorm, float* rcond );
lapack_int LAPACKE_zspcon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap,
const lapack_int* ipiv, double anorm,
double* rcond );
lapack_int LAPACKE_sspev( int matrix_order, char jobz, char uplo, lapack_int n,
float* ap, float* w, float* z, lapack_int ldz );
lapack_int LAPACKE_dspev( int matrix_order, char jobz, char uplo, lapack_int n,
double* ap, double* w, double* z, lapack_int ldz );
lapack_int LAPACKE_sspevd( int matrix_order, char jobz, char uplo, lapack_int n,
float* ap, float* w, float* z, lapack_int ldz );
lapack_int LAPACKE_dspevd( int matrix_order, char jobz, char uplo, lapack_int n,
double* ap, double* w, double* z, lapack_int ldz );
lapack_int LAPACKE_sspevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, float* ap, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_dspevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, double* ap, double vl, double vu,
lapack_int il, lapack_int iu, double abstol,
lapack_int* m, double* w, double* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_sspgst( int matrix_order, lapack_int itype, char uplo,
lapack_int n, float* ap, const float* bp );
lapack_int LAPACKE_dspgst( int matrix_order, lapack_int itype, char uplo,
lapack_int n, double* ap, const double* bp );
lapack_int LAPACKE_sspgv( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, float* ap, float* bp,
float* w, float* z, lapack_int ldz );
lapack_int LAPACKE_dspgv( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, double* ap, double* bp,
double* w, double* z, lapack_int ldz );
lapack_int LAPACKE_sspgvd( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, float* ap, float* bp,
float* w, float* z, lapack_int ldz );
lapack_int LAPACKE_dspgvd( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, double* ap, double* bp,
double* w, double* z, lapack_int ldz );
lapack_int LAPACKE_sspgvx( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n, float* ap,
float* bp, float vl, float vu, lapack_int il,
lapack_int iu, float abstol, lapack_int* m, float* w,
float* z, lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_dspgvx( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n, double* ap,
double* bp, double vl, double vu, lapack_int il,
lapack_int iu, double abstol, lapack_int* m,
double* w, double* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_ssprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* ap, const float* afp,
const lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr );
lapack_int LAPACKE_dsprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* ap, const double* afp,
const lapack_int* ipiv, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_csprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_complex_float* afp,
const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zsprfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* ap,
const lapack_complex_double* afp,
const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_sspsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, float* ap, lapack_int* ipiv,
float* b, lapack_int ldb );
lapack_int LAPACKE_dspsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* ap, lapack_int* ipiv,
double* b, lapack_int ldb );
lapack_int LAPACKE_cspsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* ap,
lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zspsv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* ap,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_sspsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const float* ap, float* afp,
lapack_int* ipiv, const float* b, lapack_int ldb,
float* x, lapack_int ldx, float* rcond, float* ferr,
float* berr );
lapack_int LAPACKE_dspsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const double* ap, double* afp,
lapack_int* ipiv, const double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* ferr, double* berr );
lapack_int LAPACKE_cspsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
lapack_complex_float* afp, lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr );
lapack_int LAPACKE_zspsvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* ap,
lapack_complex_double* afp, lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_ssptrd( int matrix_order, char uplo, lapack_int n, float* ap,
float* d, float* e, float* tau );
lapack_int LAPACKE_dsptrd( int matrix_order, char uplo, lapack_int n,
double* ap, double* d, double* e, double* tau );
lapack_int LAPACKE_ssptrf( int matrix_order, char uplo, lapack_int n, float* ap,
lapack_int* ipiv );
lapack_int LAPACKE_dsptrf( int matrix_order, char uplo, lapack_int n,
double* ap, lapack_int* ipiv );
lapack_int LAPACKE_csptrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap, lapack_int* ipiv );
lapack_int LAPACKE_zsptrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap, lapack_int* ipiv );
lapack_int LAPACKE_ssptri( int matrix_order, char uplo, lapack_int n, float* ap,
const lapack_int* ipiv );
lapack_int LAPACKE_dsptri( int matrix_order, char uplo, lapack_int n,
double* ap, const lapack_int* ipiv );
lapack_int LAPACKE_csptri( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap, const lapack_int* ipiv );
lapack_int LAPACKE_zsptri( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap, const lapack_int* ipiv );
lapack_int LAPACKE_ssptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* ap,
const lapack_int* ipiv, float* b, lapack_int ldb );
lapack_int LAPACKE_dsptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* ap,
const lapack_int* ipiv, double* b, lapack_int ldb );
lapack_int LAPACKE_csptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zsptrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* ap,
const lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_sstebz( char range, char order, lapack_int n, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
const float* d, const float* e, lapack_int* m,
lapack_int* nsplit, float* w, lapack_int* iblock,
lapack_int* isplit );
lapack_int LAPACKE_dstebz( char range, char order, lapack_int n, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, const double* d, const double* e,
lapack_int* m, lapack_int* nsplit, double* w,
lapack_int* iblock, lapack_int* isplit );
lapack_int LAPACKE_sstedc( int matrix_order, char compz, lapack_int n, float* d,
float* e, float* z, lapack_int ldz );
lapack_int LAPACKE_dstedc( int matrix_order, char compz, lapack_int n,
double* d, double* e, double* z, lapack_int ldz );
lapack_int LAPACKE_cstedc( int matrix_order, char compz, lapack_int n, float* d,
float* e, lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zstedc( int matrix_order, char compz, lapack_int n,
double* d, double* e, lapack_complex_double* z,
lapack_int ldz );
lapack_int LAPACKE_sstegr( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z, lapack_int ldz,
lapack_int* isuppz );
lapack_int LAPACKE_dstegr( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int* isuppz );
lapack_int LAPACKE_cstegr( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int ldz, lapack_int* isuppz );
lapack_int LAPACKE_zstegr( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_int* isuppz );
lapack_int LAPACKE_sstein( int matrix_order, lapack_int n, const float* d,
const float* e, lapack_int m, const float* w,
const lapack_int* iblock, const lapack_int* isplit,
float* z, lapack_int ldz, lapack_int* ifailv );
lapack_int LAPACKE_dstein( int matrix_order, lapack_int n, const double* d,
const double* e, lapack_int m, const double* w,
const lapack_int* iblock, const lapack_int* isplit,
double* z, lapack_int ldz, lapack_int* ifailv );
lapack_int LAPACKE_cstein( int matrix_order, lapack_int n, const float* d,
const float* e, lapack_int m, const float* w,
const lapack_int* iblock, const lapack_int* isplit,
lapack_complex_float* z, lapack_int ldz,
lapack_int* ifailv );
lapack_int LAPACKE_zstein( int matrix_order, lapack_int n, const double* d,
const double* e, lapack_int m, const double* w,
const lapack_int* iblock, const lapack_int* isplit,
lapack_complex_double* z, lapack_int ldz,
lapack_int* ifailv );
lapack_int LAPACKE_sstemr( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl, float vu,
lapack_int il, lapack_int iu, lapack_int* m,
float* w, float* z, lapack_int ldz, lapack_int nzc,
lapack_int* isuppz, lapack_logical* tryrac );
lapack_int LAPACKE_dstemr( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
lapack_int* m, double* w, double* z, lapack_int ldz,
lapack_int nzc, lapack_int* isuppz,
lapack_logical* tryrac );
lapack_int LAPACKE_cstemr( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl, float vu,
lapack_int il, lapack_int iu, lapack_int* m,
float* w, lapack_complex_float* z, lapack_int ldz,
lapack_int nzc, lapack_int* isuppz,
lapack_logical* tryrac );
lapack_int LAPACKE_zstemr( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
lapack_int* m, double* w, lapack_complex_double* z,
lapack_int ldz, lapack_int nzc, lapack_int* isuppz,
lapack_logical* tryrac );
lapack_int LAPACKE_ssteqr( int matrix_order, char compz, lapack_int n, float* d,
float* e, float* z, lapack_int ldz );
lapack_int LAPACKE_dsteqr( int matrix_order, char compz, lapack_int n,
double* d, double* e, double* z, lapack_int ldz );
lapack_int LAPACKE_csteqr( int matrix_order, char compz, lapack_int n, float* d,
float* e, lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zsteqr( int matrix_order, char compz, lapack_int n,
double* d, double* e, lapack_complex_double* z,
lapack_int ldz );
lapack_int LAPACKE_ssterf( lapack_int n, float* d, float* e );
lapack_int LAPACKE_dsterf( lapack_int n, double* d, double* e );
lapack_int LAPACKE_sstev( int matrix_order, char jobz, lapack_int n, float* d,
float* e, float* z, lapack_int ldz );
lapack_int LAPACKE_dstev( int matrix_order, char jobz, lapack_int n, double* d,
double* e, double* z, lapack_int ldz );
lapack_int LAPACKE_sstevd( int matrix_order, char jobz, lapack_int n, float* d,
float* e, float* z, lapack_int ldz );
lapack_int LAPACKE_dstevd( int matrix_order, char jobz, lapack_int n, double* d,
double* e, double* z, lapack_int ldz );
lapack_int LAPACKE_sstevr( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z, lapack_int ldz,
lapack_int* isuppz );
lapack_int LAPACKE_dstevr( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int* isuppz );
lapack_int LAPACKE_sstevx( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_dstevx( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_ssycon( int matrix_order, char uplo, lapack_int n,
const float* a, lapack_int lda,
const lapack_int* ipiv, float anorm, float* rcond );
lapack_int LAPACKE_dsycon( int matrix_order, char uplo, lapack_int n,
const double* a, lapack_int lda,
const lapack_int* ipiv, double anorm,
double* rcond );
lapack_int LAPACKE_csycon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv, float anorm, float* rcond );
lapack_int LAPACKE_zsycon( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv, double anorm,
double* rcond );
lapack_int LAPACKE_ssyequb( int matrix_order, char uplo, lapack_int n,
const float* a, lapack_int lda, float* s,
float* scond, float* amax );
lapack_int LAPACKE_dsyequb( int matrix_order, char uplo, lapack_int n,
const double* a, lapack_int lda, double* s,
double* scond, double* amax );
lapack_int LAPACKE_csyequb( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* s, float* scond, float* amax );
lapack_int LAPACKE_zsyequb( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* s, double* scond, double* amax );
lapack_int LAPACKE_ssyev( int matrix_order, char jobz, char uplo, lapack_int n,
float* a, lapack_int lda, float* w );
lapack_int LAPACKE_dsyev( int matrix_order, char jobz, char uplo, lapack_int n,
double* a, lapack_int lda, double* w );
lapack_int LAPACKE_ssyevd( int matrix_order, char jobz, char uplo, lapack_int n,
float* a, lapack_int lda, float* w );
lapack_int LAPACKE_dsyevd( int matrix_order, char jobz, char uplo, lapack_int n,
double* a, lapack_int lda, double* w );
lapack_int LAPACKE_ssyevr( int matrix_order, char jobz, char range, char uplo,
lapack_int n, float* a, lapack_int lda, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z, lapack_int ldz,
lapack_int* isuppz );
lapack_int LAPACKE_dsyevr( int matrix_order, char jobz, char range, char uplo,
lapack_int n, double* a, lapack_int lda, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int* isuppz );
lapack_int LAPACKE_ssyevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, float* a, lapack_int lda, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_dsyevx( int matrix_order, char jobz, char range, char uplo,
lapack_int n, double* a, lapack_int lda, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_ssygst( int matrix_order, lapack_int itype, char uplo,
lapack_int n, float* a, lapack_int lda,
const float* b, lapack_int ldb );
lapack_int LAPACKE_dsygst( int matrix_order, lapack_int itype, char uplo,
lapack_int n, double* a, lapack_int lda,
const double* b, lapack_int ldb );
lapack_int LAPACKE_ssygv( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, float* a, lapack_int lda,
float* b, lapack_int ldb, float* w );
lapack_int LAPACKE_dsygv( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, double* a, lapack_int lda,
double* b, lapack_int ldb, double* w );
lapack_int LAPACKE_ssygvd( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, float* a, lapack_int lda,
float* b, lapack_int ldb, float* w );
lapack_int LAPACKE_dsygvd( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, double* a, lapack_int lda,
double* b, lapack_int ldb, double* w );
lapack_int LAPACKE_ssygvx( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb, float vl,
float vu, lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z, lapack_int ldz,
lapack_int* ifail );
lapack_int LAPACKE_dsygvx( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int* ifail );
lapack_int LAPACKE_ssyrfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const float* af, lapack_int ldaf,
const lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr );
lapack_int LAPACKE_dsyrfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a, lapack_int lda,
const double* af, lapack_int ldaf,
const lapack_int* ipiv, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_csyrfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_zsyrfs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_ssyrfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs, const float* a,
lapack_int lda, const float* af, lapack_int ldaf,
const lapack_int* ipiv, const float* s,
const float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_dsyrfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs, const double* a,
lapack_int lda, const double* af, lapack_int ldaf,
const lapack_int* ipiv, const double* s,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params );
lapack_int LAPACKE_csyrfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* af, lapack_int ldaf,
const lapack_int* ipiv, const float* s,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params );
lapack_int LAPACKE_zsyrfsx( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* af, lapack_int ldaf,
const lapack_int* ipiv, const double* s,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params );
lapack_int LAPACKE_ssysv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda,
lapack_int* ipiv, float* b, lapack_int ldb );
lapack_int LAPACKE_dsysv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
lapack_int* ipiv, double* b, lapack_int ldb );
lapack_int LAPACKE_csysv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zsysv( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_ssysvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
float* af, lapack_int ldaf, lapack_int* ipiv,
const float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr );
lapack_int LAPACKE_dsysvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const double* a, lapack_int lda,
double* af, lapack_int ldaf, lapack_int* ipiv,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* ferr,
double* berr );
lapack_int LAPACKE_csysvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, lapack_complex_float* af,
lapack_int ldaf, lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr );
lapack_int LAPACKE_zsysvx( int matrix_order, char fact, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, lapack_complex_double* af,
lapack_int ldaf, lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr );
lapack_int LAPACKE_ssysvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* s, float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_dsysvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* s, double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params );
lapack_int LAPACKE_csysvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* s,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params );
lapack_int LAPACKE_zsysvxx( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* s,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params );
lapack_int LAPACKE_ssytrd( int matrix_order, char uplo, lapack_int n, float* a,
lapack_int lda, float* d, float* e, float* tau );
lapack_int LAPACKE_dsytrd( int matrix_order, char uplo, lapack_int n, double* a,
lapack_int lda, double* d, double* e, double* tau );
lapack_int LAPACKE_ssytrf( int matrix_order, char uplo, lapack_int n, float* a,
lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_dsytrf( int matrix_order, char uplo, lapack_int n, double* a,
lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_csytrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_zsytrf( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_ssytri( int matrix_order, char uplo, lapack_int n, float* a,
lapack_int lda, const lapack_int* ipiv );
lapack_int LAPACKE_dsytri( int matrix_order, char uplo, lapack_int n, double* a,
lapack_int lda, const lapack_int* ipiv );
lapack_int LAPACKE_csytri( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_zsytri( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_ssytrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const lapack_int* ipiv, float* b, lapack_int ldb );
lapack_int LAPACKE_dsytrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a, lapack_int lda,
const lapack_int* ipiv, double* b, lapack_int ldb );
lapack_int LAPACKE_csytrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zsytrs( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_stbcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, lapack_int kd, const float* ab,
lapack_int ldab, float* rcond );
lapack_int LAPACKE_dtbcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, lapack_int kd, const double* ab,
lapack_int ldab, double* rcond );
lapack_int LAPACKE_ctbcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, lapack_int kd,
const lapack_complex_float* ab, lapack_int ldab,
float* rcond );
lapack_int LAPACKE_ztbcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, lapack_int kd,
const lapack_complex_double* ab, lapack_int ldab,
double* rcond );
lapack_int LAPACKE_stbrfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int kd, lapack_int nrhs,
const float* ab, lapack_int ldab, const float* b,
lapack_int ldb, const float* x, lapack_int ldx,
float* ferr, float* berr );
lapack_int LAPACKE_dtbrfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int kd, lapack_int nrhs,
const double* ab, lapack_int ldab, const double* b,
lapack_int ldb, const double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_ctbrfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int kd, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
const lapack_complex_float* b, lapack_int ldb,
const lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr );
lapack_int LAPACKE_ztbrfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int kd, lapack_int nrhs,
const lapack_complex_double* ab, lapack_int ldab,
const lapack_complex_double* b, lapack_int ldb,
const lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_stbtrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int kd, lapack_int nrhs,
const float* ab, lapack_int ldab, float* b,
lapack_int ldb );
lapack_int LAPACKE_dtbtrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int kd, lapack_int nrhs,
const double* ab, lapack_int ldab, double* b,
lapack_int ldb );
lapack_int LAPACKE_ctbtrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int kd, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_ztbtrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int kd, lapack_int nrhs,
const lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_stfsm( int matrix_order, char transr, char side, char uplo,
char trans, char diag, lapack_int m, lapack_int n,
float alpha, const float* a, float* b,
lapack_int ldb );
lapack_int LAPACKE_dtfsm( int matrix_order, char transr, char side, char uplo,
char trans, char diag, lapack_int m, lapack_int n,
double alpha, const double* a, double* b,
lapack_int ldb );
lapack_int LAPACKE_ctfsm( int matrix_order, char transr, char side, char uplo,
char trans, char diag, lapack_int m, lapack_int n,
lapack_complex_float alpha,
const lapack_complex_float* a,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_ztfsm( int matrix_order, char transr, char side, char uplo,
char trans, char diag, lapack_int m, lapack_int n,
lapack_complex_double alpha,
const lapack_complex_double* a,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_stftri( int matrix_order, char transr, char uplo, char diag,
lapack_int n, float* a );
lapack_int LAPACKE_dtftri( int matrix_order, char transr, char uplo, char diag,
lapack_int n, double* a );
lapack_int LAPACKE_ctftri( int matrix_order, char transr, char uplo, char diag,
lapack_int n, lapack_complex_float* a );
lapack_int LAPACKE_ztftri( int matrix_order, char transr, char uplo, char diag,
lapack_int n, lapack_complex_double* a );
lapack_int LAPACKE_stfttp( int matrix_order, char transr, char uplo,
lapack_int n, const float* arf, float* ap );
lapack_int LAPACKE_dtfttp( int matrix_order, char transr, char uplo,
lapack_int n, const double* arf, double* ap );
lapack_int LAPACKE_ctfttp( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_float* arf,
lapack_complex_float* ap );
lapack_int LAPACKE_ztfttp( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_double* arf,
lapack_complex_double* ap );
lapack_int LAPACKE_stfttr( int matrix_order, char transr, char uplo,
lapack_int n, const float* arf, float* a,
lapack_int lda );
lapack_int LAPACKE_dtfttr( int matrix_order, char transr, char uplo,
lapack_int n, const double* arf, double* a,
lapack_int lda );
lapack_int LAPACKE_ctfttr( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_float* arf,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_ztfttr( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_double* arf,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_stgevc( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
const float* s, lapack_int lds, const float* p,
lapack_int ldp, float* vl, lapack_int ldvl,
float* vr, lapack_int ldvr, lapack_int mm,
lapack_int* m );
lapack_int LAPACKE_dtgevc( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
const double* s, lapack_int lds, const double* p,
lapack_int ldp, double* vl, lapack_int ldvl,
double* vr, lapack_int ldvr, lapack_int mm,
lapack_int* m );
lapack_int LAPACKE_ctgevc( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_float* s, lapack_int lds,
const lapack_complex_float* p, lapack_int ldp,
lapack_complex_float* vl, lapack_int ldvl,
lapack_complex_float* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m );
lapack_int LAPACKE_ztgevc( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_double* s, lapack_int lds,
const lapack_complex_double* p, lapack_int ldp,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m );
lapack_int LAPACKE_stgexc( int matrix_order, lapack_logical wantq,
lapack_logical wantz, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb, float* q,
lapack_int ldq, float* z, lapack_int ldz,
lapack_int* ifst, lapack_int* ilst );
lapack_int LAPACKE_dtgexc( int matrix_order, lapack_logical wantq,
lapack_logical wantz, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb, double* q,
lapack_int ldq, double* z, lapack_int ldz,
lapack_int* ifst, lapack_int* ilst );
lapack_int LAPACKE_ctgexc( int matrix_order, lapack_logical wantq,
lapack_logical wantz, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* z, lapack_int ldz,
lapack_int ifst, lapack_int ilst );
lapack_int LAPACKE_ztgexc( int matrix_order, lapack_logical wantq,
lapack_logical wantz, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* z, lapack_int ldz,
lapack_int ifst, lapack_int ilst );
lapack_int LAPACKE_stgsen( int matrix_order, lapack_int ijob,
lapack_logical wantq, lapack_logical wantz,
const lapack_logical* select, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb,
float* alphar, float* alphai, float* beta, float* q,
lapack_int ldq, float* z, lapack_int ldz,
lapack_int* m, float* pl, float* pr, float* dif );
lapack_int LAPACKE_dtgsen( int matrix_order, lapack_int ijob,
lapack_logical wantq, lapack_logical wantz,
const lapack_logical* select, lapack_int n,
double* a, lapack_int lda, double* b, lapack_int ldb,
double* alphar, double* alphai, double* beta,
double* q, lapack_int ldq, double* z, lapack_int ldz,
lapack_int* m, double* pl, double* pr, double* dif );
lapack_int LAPACKE_ctgsen( int matrix_order, lapack_int ijob,
lapack_logical wantq, lapack_logical wantz,
const lapack_logical* select, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* alpha,
lapack_complex_float* beta, lapack_complex_float* q,
lapack_int ldq, lapack_complex_float* z,
lapack_int ldz, lapack_int* m, float* pl, float* pr,
float* dif );
lapack_int LAPACKE_ztgsen( int matrix_order, lapack_int ijob,
lapack_logical wantq, lapack_logical wantz,
const lapack_logical* select, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* z, lapack_int ldz,
lapack_int* m, double* pl, double* pr, double* dif );
lapack_int LAPACKE_stgsja( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int p, lapack_int n,
lapack_int k, lapack_int l, float* a, lapack_int lda,
float* b, lapack_int ldb, float tola, float tolb,
float* alpha, float* beta, float* u, lapack_int ldu,
float* v, lapack_int ldv, float* q, lapack_int ldq,
lapack_int* ncycle );
lapack_int LAPACKE_dtgsja( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int p, lapack_int n,
lapack_int k, lapack_int l, double* a,
lapack_int lda, double* b, lapack_int ldb,
double tola, double tolb, double* alpha,
double* beta, double* u, lapack_int ldu, double* v,
lapack_int ldv, double* q, lapack_int ldq,
lapack_int* ncycle );
lapack_int LAPACKE_ctgsja( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int p, lapack_int n,
lapack_int k, lapack_int l, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, float tola, float tolb, float* alpha,
float* beta, lapack_complex_float* u, lapack_int ldu,
lapack_complex_float* v, lapack_int ldv,
lapack_complex_float* q, lapack_int ldq,
lapack_int* ncycle );
lapack_int LAPACKE_ztgsja( int matrix_order, char jobu, char jobv, char jobq,
lapack_int m, lapack_int p, lapack_int n,
lapack_int k, lapack_int l, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, double tola, double tolb,
double* alpha, double* beta,
lapack_complex_double* u, lapack_int ldu,
lapack_complex_double* v, lapack_int ldv,
lapack_complex_double* q, lapack_int ldq,
lapack_int* ncycle );
lapack_int LAPACKE_stgsna( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const float* a, lapack_int lda, const float* b,
lapack_int ldb, const float* vl, lapack_int ldvl,
const float* vr, lapack_int ldvr, float* s,
float* dif, lapack_int mm, lapack_int* m );
lapack_int LAPACKE_dtgsna( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const double* a, lapack_int lda, const double* b,
lapack_int ldb, const double* vl, lapack_int ldvl,
const double* vr, lapack_int ldvr, double* s,
double* dif, lapack_int mm, lapack_int* m );
lapack_int LAPACKE_ctgsna( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* b, lapack_int ldb,
const lapack_complex_float* vl, lapack_int ldvl,
const lapack_complex_float* vr, lapack_int ldvr,
float* s, float* dif, lapack_int mm, lapack_int* m );
lapack_int LAPACKE_ztgsna( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* b, lapack_int ldb,
const lapack_complex_double* vl, lapack_int ldvl,
const lapack_complex_double* vr, lapack_int ldvr,
double* s, double* dif, lapack_int mm,
lapack_int* m );
lapack_int LAPACKE_stgsyl( int matrix_order, char trans, lapack_int ijob,
lapack_int m, lapack_int n, const float* a,
lapack_int lda, const float* b, lapack_int ldb,
float* c, lapack_int ldc, const float* d,
lapack_int ldd, const float* e, lapack_int lde,
float* f, lapack_int ldf, float* scale, float* dif );
lapack_int LAPACKE_dtgsyl( int matrix_order, char trans, lapack_int ijob,
lapack_int m, lapack_int n, const double* a,
lapack_int lda, const double* b, lapack_int ldb,
double* c, lapack_int ldc, const double* d,
lapack_int ldd, const double* e, lapack_int lde,
double* f, lapack_int ldf, double* scale,
double* dif );
lapack_int LAPACKE_ctgsyl( int matrix_order, char trans, lapack_int ijob,
lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* c, lapack_int ldc,
const lapack_complex_float* d, lapack_int ldd,
const lapack_complex_float* e, lapack_int lde,
lapack_complex_float* f, lapack_int ldf,
float* scale, float* dif );
lapack_int LAPACKE_ztgsyl( int matrix_order, char trans, lapack_int ijob,
lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* c, lapack_int ldc,
const lapack_complex_double* d, lapack_int ldd,
const lapack_complex_double* e, lapack_int lde,
lapack_complex_double* f, lapack_int ldf,
double* scale, double* dif );
lapack_int LAPACKE_stpcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, const float* ap, float* rcond );
lapack_int LAPACKE_dtpcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, const double* ap, double* rcond );
lapack_int LAPACKE_ctpcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, const lapack_complex_float* ap,
float* rcond );
lapack_int LAPACKE_ztpcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, const lapack_complex_double* ap,
double* rcond );
lapack_int LAPACKE_stprfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs, const float* ap,
const float* b, lapack_int ldb, const float* x,
lapack_int ldx, float* ferr, float* berr );
lapack_int LAPACKE_dtprfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs, const double* ap,
const double* b, lapack_int ldb, const double* x,
lapack_int ldx, double* ferr, double* berr );
lapack_int LAPACKE_ctprfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* ap,
const lapack_complex_float* b, lapack_int ldb,
const lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr );
lapack_int LAPACKE_ztprfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* ap,
const lapack_complex_double* b, lapack_int ldb,
const lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_stptri( int matrix_order, char uplo, char diag, lapack_int n,
float* ap );
lapack_int LAPACKE_dtptri( int matrix_order, char uplo, char diag, lapack_int n,
double* ap );
lapack_int LAPACKE_ctptri( int matrix_order, char uplo, char diag, lapack_int n,
lapack_complex_float* ap );
lapack_int LAPACKE_ztptri( int matrix_order, char uplo, char diag, lapack_int n,
lapack_complex_double* ap );
lapack_int LAPACKE_stptrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs, const float* ap,
float* b, lapack_int ldb );
lapack_int LAPACKE_dtptrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs, const double* ap,
double* b, lapack_int ldb );
lapack_int LAPACKE_ctptrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* ap,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_ztptrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* ap,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_stpttf( int matrix_order, char transr, char uplo,
lapack_int n, const float* ap, float* arf );
lapack_int LAPACKE_dtpttf( int matrix_order, char transr, char uplo,
lapack_int n, const double* ap, double* arf );
lapack_int LAPACKE_ctpttf( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_float* ap,
lapack_complex_float* arf );
lapack_int LAPACKE_ztpttf( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_double* ap,
lapack_complex_double* arf );
lapack_int LAPACKE_stpttr( int matrix_order, char uplo, lapack_int n,
const float* ap, float* a, lapack_int lda );
lapack_int LAPACKE_dtpttr( int matrix_order, char uplo, lapack_int n,
const double* ap, double* a, lapack_int lda );
lapack_int LAPACKE_ctpttr( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_ztpttr( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_strcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, const float* a, lapack_int lda,
float* rcond );
lapack_int LAPACKE_dtrcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, const double* a, lapack_int lda,
double* rcond );
lapack_int LAPACKE_ctrcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, const lapack_complex_float* a,
lapack_int lda, float* rcond );
lapack_int LAPACKE_ztrcon( int matrix_order, char norm, char uplo, char diag,
lapack_int n, const lapack_complex_double* a,
lapack_int lda, double* rcond );
lapack_int LAPACKE_strevc( int matrix_order, char side, char howmny,
lapack_logical* select, lapack_int n, const float* t,
lapack_int ldt, float* vl, lapack_int ldvl,
float* vr, lapack_int ldvr, lapack_int mm,
lapack_int* m );
lapack_int LAPACKE_dtrevc( int matrix_order, char side, char howmny,
lapack_logical* select, lapack_int n,
const double* t, lapack_int ldt, double* vl,
lapack_int ldvl, double* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m );
lapack_int LAPACKE_ctrevc( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* vl, lapack_int ldvl,
lapack_complex_float* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m );
lapack_int LAPACKE_ztrevc( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m );
lapack_int LAPACKE_strexc( int matrix_order, char compq, lapack_int n, float* t,
lapack_int ldt, float* q, lapack_int ldq,
lapack_int* ifst, lapack_int* ilst );
lapack_int LAPACKE_dtrexc( int matrix_order, char compq, lapack_int n,
double* t, lapack_int ldt, double* q, lapack_int ldq,
lapack_int* ifst, lapack_int* ilst );
lapack_int LAPACKE_ctrexc( int matrix_order, char compq, lapack_int n,
lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* q, lapack_int ldq,
lapack_int ifst, lapack_int ilst );
lapack_int LAPACKE_ztrexc( int matrix_order, char compq, lapack_int n,
lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* q, lapack_int ldq,
lapack_int ifst, lapack_int ilst );
lapack_int LAPACKE_strrfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs, const float* a,
lapack_int lda, const float* b, lapack_int ldb,
const float* x, lapack_int ldx, float* ferr,
float* berr );
lapack_int LAPACKE_dtrrfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs, const double* a,
lapack_int lda, const double* b, lapack_int ldb,
const double* x, lapack_int ldx, double* ferr,
double* berr );
lapack_int LAPACKE_ctrrfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* b, lapack_int ldb,
const lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr );
lapack_int LAPACKE_ztrrfs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* b, lapack_int ldb,
const lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr );
lapack_int LAPACKE_strsen( int matrix_order, char job, char compq,
const lapack_logical* select, lapack_int n, float* t,
lapack_int ldt, float* q, lapack_int ldq, float* wr,
float* wi, lapack_int* m, float* s, float* sep );
lapack_int LAPACKE_dtrsen( int matrix_order, char job, char compq,
const lapack_logical* select, lapack_int n,
double* t, lapack_int ldt, double* q, lapack_int ldq,
double* wr, double* wi, lapack_int* m, double* s,
double* sep );
lapack_int LAPACKE_ctrsen( int matrix_order, char job, char compq,
const lapack_logical* select, lapack_int n,
lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* w, lapack_int* m, float* s,
float* sep );
lapack_int LAPACKE_ztrsen( int matrix_order, char job, char compq,
const lapack_logical* select, lapack_int n,
lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* w, lapack_int* m, double* s,
double* sep );
lapack_int LAPACKE_strsna( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const float* t, lapack_int ldt, const float* vl,
lapack_int ldvl, const float* vr, lapack_int ldvr,
float* s, float* sep, lapack_int mm, lapack_int* m );
lapack_int LAPACKE_dtrsna( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const double* t, lapack_int ldt, const double* vl,
lapack_int ldvl, const double* vr, lapack_int ldvr,
double* s, double* sep, lapack_int mm,
lapack_int* m );
lapack_int LAPACKE_ctrsna( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_float* t, lapack_int ldt,
const lapack_complex_float* vl, lapack_int ldvl,
const lapack_complex_float* vr, lapack_int ldvr,
float* s, float* sep, lapack_int mm, lapack_int* m );
lapack_int LAPACKE_ztrsna( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_double* t, lapack_int ldt,
const lapack_complex_double* vl, lapack_int ldvl,
const lapack_complex_double* vr, lapack_int ldvr,
double* s, double* sep, lapack_int mm,
lapack_int* m );
lapack_int LAPACKE_strsyl( int matrix_order, char trana, char tranb,
lapack_int isgn, lapack_int m, lapack_int n,
const float* a, lapack_int lda, const float* b,
lapack_int ldb, float* c, lapack_int ldc,
float* scale );
lapack_int LAPACKE_dtrsyl( int matrix_order, char trana, char tranb,
lapack_int isgn, lapack_int m, lapack_int n,
const double* a, lapack_int lda, const double* b,
lapack_int ldb, double* c, lapack_int ldc,
double* scale );
lapack_int LAPACKE_ctrsyl( int matrix_order, char trana, char tranb,
lapack_int isgn, lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* c, lapack_int ldc,
float* scale );
lapack_int LAPACKE_ztrsyl( int matrix_order, char trana, char tranb,
lapack_int isgn, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* c, lapack_int ldc,
double* scale );
lapack_int LAPACKE_strtri( int matrix_order, char uplo, char diag, lapack_int n,
float* a, lapack_int lda );
lapack_int LAPACKE_dtrtri( int matrix_order, char uplo, char diag, lapack_int n,
double* a, lapack_int lda );
lapack_int LAPACKE_ctrtri( int matrix_order, char uplo, char diag, lapack_int n,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_ztrtri( int matrix_order, char uplo, char diag, lapack_int n,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_strtrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs, const float* a,
lapack_int lda, float* b, lapack_int ldb );
lapack_int LAPACKE_dtrtrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs, const double* a,
lapack_int lda, double* b, lapack_int ldb );
lapack_int LAPACKE_ctrtrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_ztrtrs( int matrix_order, char uplo, char trans, char diag,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_strttf( int matrix_order, char transr, char uplo,
lapack_int n, const float* a, lapack_int lda,
float* arf );
lapack_int LAPACKE_dtrttf( int matrix_order, char transr, char uplo,
lapack_int n, const double* a, lapack_int lda,
double* arf );
lapack_int LAPACKE_ctrttf( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_float* a,
lapack_int lda, lapack_complex_float* arf );
lapack_int LAPACKE_ztrttf( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_double* a,
lapack_int lda, lapack_complex_double* arf );
lapack_int LAPACKE_strttp( int matrix_order, char uplo, lapack_int n,
const float* a, lapack_int lda, float* ap );
lapack_int LAPACKE_dtrttp( int matrix_order, char uplo, lapack_int n,
const double* a, lapack_int lda, double* ap );
lapack_int LAPACKE_ctrttp( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
lapack_complex_float* ap );
lapack_int LAPACKE_ztrttp( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
lapack_complex_double* ap );
lapack_int LAPACKE_stzrzf( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau );
lapack_int LAPACKE_dtzrzf( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau );
lapack_int LAPACKE_ctzrzf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau );
lapack_int LAPACKE_ztzrzf( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau );
lapack_int LAPACKE_cungbr( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int k, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau );
lapack_int LAPACKE_zungbr( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int k, lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* tau );
lapack_int LAPACKE_cunghr( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau );
lapack_int LAPACKE_zunghr( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* tau );
lapack_int LAPACKE_cunglq( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau );
lapack_int LAPACKE_zunglq( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* tau );
lapack_int LAPACKE_cungql( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau );
lapack_int LAPACKE_zungql( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* tau );
lapack_int LAPACKE_cungqr( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau );
lapack_int LAPACKE_zungqr( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* tau );
lapack_int LAPACKE_cungrq( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau );
lapack_int LAPACKE_zungrq( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* tau );
lapack_int LAPACKE_cungtr( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau );
lapack_int LAPACKE_zungtr( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau );
lapack_int LAPACKE_cunmbr( int matrix_order, char vect, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zunmbr( int matrix_order, char vect, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_cunmhr( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int ilo,
lapack_int ihi, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zunmhr( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int ilo,
lapack_int ihi, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_cunmlq( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zunmlq( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_cunmql( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zunmql( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_cunmqr( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zunmqr( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_cunmrq( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zunmrq( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_cunmrz( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zunmrz( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_cunmtr( int matrix_order, char side, char uplo, char trans,
lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zunmtr( int matrix_order, char side, char uplo, char trans,
lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_cupgtr( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap,
const lapack_complex_float* tau,
lapack_complex_float* q, lapack_int ldq );
lapack_int LAPACKE_zupgtr( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap,
const lapack_complex_double* tau,
lapack_complex_double* q, lapack_int ldq );
lapack_int LAPACKE_cupmtr( int matrix_order, char side, char uplo, char trans,
lapack_int m, lapack_int n,
const lapack_complex_float* ap,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc );
lapack_int LAPACKE_zupmtr( int matrix_order, char side, char uplo, char trans,
lapack_int m, lapack_int n,
const lapack_complex_double* ap,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc );
lapack_int LAPACKE_sbdsdc_work( int matrix_order, char uplo, char compq,
lapack_int n, float* d, float* e, float* u,
lapack_int ldu, float* vt, lapack_int ldvt,
float* q, lapack_int* iq, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dbdsdc_work( int matrix_order, char uplo, char compq,
lapack_int n, double* d, double* e, double* u,
lapack_int ldu, double* vt, lapack_int ldvt,
double* q, lapack_int* iq, double* work,
lapack_int* iwork );
lapack_int LAPACKE_sbdsqr_work( int matrix_order, char uplo, lapack_int n,
lapack_int ncvt, lapack_int nru, lapack_int ncc,
float* d, float* e, float* vt, lapack_int ldvt,
float* u, lapack_int ldu, float* c,
lapack_int ldc, float* work );
lapack_int LAPACKE_dbdsqr_work( int matrix_order, char uplo, lapack_int n,
lapack_int ncvt, lapack_int nru, lapack_int ncc,
double* d, double* e, double* vt,
lapack_int ldvt, double* u, lapack_int ldu,
double* c, lapack_int ldc, double* work );
lapack_int LAPACKE_cbdsqr_work( int matrix_order, char uplo, lapack_int n,
lapack_int ncvt, lapack_int nru, lapack_int ncc,
float* d, float* e, lapack_complex_float* vt,
lapack_int ldvt, lapack_complex_float* u,
lapack_int ldu, lapack_complex_float* c,
lapack_int ldc, float* work );
lapack_int LAPACKE_zbdsqr_work( int matrix_order, char uplo, lapack_int n,
lapack_int ncvt, lapack_int nru, lapack_int ncc,
double* d, double* e, lapack_complex_double* vt,
lapack_int ldvt, lapack_complex_double* u,
lapack_int ldu, lapack_complex_double* c,
lapack_int ldc, double* work );
lapack_int LAPACKE_sdisna_work( char job, lapack_int m, lapack_int n,
const float* d, float* sep );
lapack_int LAPACKE_ddisna_work( char job, lapack_int m, lapack_int n,
const double* d, double* sep );
lapack_int LAPACKE_sgbbrd_work( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int ncc, lapack_int kl,
lapack_int ku, float* ab, lapack_int ldab,
float* d, float* e, float* q, lapack_int ldq,
float* pt, lapack_int ldpt, float* c,
lapack_int ldc, float* work );
lapack_int LAPACKE_dgbbrd_work( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int ncc, lapack_int kl,
lapack_int ku, double* ab, lapack_int ldab,
double* d, double* e, double* q, lapack_int ldq,
double* pt, lapack_int ldpt, double* c,
lapack_int ldc, double* work );
lapack_int LAPACKE_cgbbrd_work( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int ncc, lapack_int kl,
lapack_int ku, lapack_complex_float* ab,
lapack_int ldab, float* d, float* e,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* pt, lapack_int ldpt,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zgbbrd_work( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int ncc, lapack_int kl,
lapack_int ku, lapack_complex_double* ab,
lapack_int ldab, double* d, double* e,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* pt, lapack_int ldpt,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sgbcon_work( int matrix_order, char norm, lapack_int n,
lapack_int kl, lapack_int ku, const float* ab,
lapack_int ldab, const lapack_int* ipiv,
float anorm, float* rcond, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dgbcon_work( int matrix_order, char norm, lapack_int n,
lapack_int kl, lapack_int ku, const double* ab,
lapack_int ldab, const lapack_int* ipiv,
double anorm, double* rcond, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cgbcon_work( int matrix_order, char norm, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_float* ab, lapack_int ldab,
const lapack_int* ipiv, float anorm,
float* rcond, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zgbcon_work( int matrix_order, char norm, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_double* ab,
lapack_int ldab, const lapack_int* ipiv,
double anorm, double* rcond,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sgbequ_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const float* ab,
lapack_int ldab, float* r, float* c,
float* rowcnd, float* colcnd, float* amax );
lapack_int LAPACKE_dgbequ_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const double* ab,
lapack_int ldab, double* r, double* c,
double* rowcnd, double* colcnd, double* amax );
lapack_int LAPACKE_cgbequ_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_float* ab, lapack_int ldab,
float* r, float* c, float* rowcnd,
float* colcnd, float* amax );
lapack_int LAPACKE_zgbequ_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_double* ab,
lapack_int ldab, double* r, double* c,
double* rowcnd, double* colcnd, double* amax );
lapack_int LAPACKE_sgbequb_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const float* ab,
lapack_int ldab, float* r, float* c,
float* rowcnd, float* colcnd, float* amax );
lapack_int LAPACKE_dgbequb_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const double* ab,
lapack_int ldab, double* r, double* c,
double* rowcnd, double* colcnd, double* amax );
lapack_int LAPACKE_cgbequb_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_float* ab,
lapack_int ldab, float* r, float* c,
float* rowcnd, float* colcnd, float* amax );
lapack_int LAPACKE_zgbequb_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
const lapack_complex_double* ab,
lapack_int ldab, double* r, double* c,
double* rowcnd, double* colcnd, double* amax );
lapack_int LAPACKE_sgbrfs_work( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const float* ab, lapack_int ldab,
const float* afb, lapack_int ldafb,
const lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dgbrfs_work( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const double* ab, lapack_int ldab,
const double* afb, lapack_int ldafb,
const lapack_int* ipiv, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* ferr, double* berr, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cgbrfs_work( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
const lapack_complex_float* afb,
lapack_int ldafb, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zgbrfs_work( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const lapack_complex_double* ab,
lapack_int ldab,
const lapack_complex_double* afb,
lapack_int ldafb, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sgbrfsx_work( int matrix_order, char trans, char equed,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, const float* ab,
lapack_int ldab, const float* afb,
lapack_int ldafb, const lapack_int* ipiv,
const float* r, const float* c, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dgbrfsx_work( int matrix_order, char trans, char equed,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, const double* ab,
lapack_int ldab, const double* afb,
lapack_int ldafb, const lapack_int* ipiv,
const double* r, const double* c,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cgbrfsx_work( int matrix_order, char trans, char equed,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs,
const lapack_complex_float* ab,
lapack_int ldab,
const lapack_complex_float* afb,
lapack_int ldafb, const lapack_int* ipiv,
const float* r, const float* c,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zgbrfsx_work( int matrix_order, char trans, char equed,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs,
const lapack_complex_double* ab,
lapack_int ldab,
const lapack_complex_double* afb,
lapack_int ldafb, const lapack_int* ipiv,
const double* r, const double* c,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_sgbsv_work( int matrix_order, lapack_int n, lapack_int kl,
lapack_int ku, lapack_int nrhs, float* ab,
lapack_int ldab, lapack_int* ipiv, float* b,
lapack_int ldb );
lapack_int LAPACKE_dgbsv_work( int matrix_order, lapack_int n, lapack_int kl,
lapack_int ku, lapack_int nrhs, double* ab,
lapack_int ldab, lapack_int* ipiv, double* b,
lapack_int ldb );
lapack_int LAPACKE_cgbsv_work( int matrix_order, lapack_int n, lapack_int kl,
lapack_int ku, lapack_int nrhs,
lapack_complex_float* ab, lapack_int ldab,
lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zgbsv_work( int matrix_order, lapack_int n, lapack_int kl,
lapack_int ku, lapack_int nrhs,
lapack_complex_double* ab, lapack_int ldab,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_sgbsvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, float* ab, lapack_int ldab,
float* afb, lapack_int ldafb, lapack_int* ipiv,
char* equed, float* r, float* c, float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dgbsvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, double* ab, lapack_int ldab,
double* afb, lapack_int ldafb, lapack_int* ipiv,
char* equed, double* r, double* c, double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cgbsvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, lapack_complex_float* ab,
lapack_int ldab, lapack_complex_float* afb,
lapack_int ldafb, lapack_int* ipiv, char* equed,
float* r, float* c, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zgbsvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, lapack_complex_double* ab,
lapack_int ldab, lapack_complex_double* afb,
lapack_int ldafb, lapack_int* ipiv, char* equed,
double* r, double* c, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* ferr,
double* berr, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_sgbsvxx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, float* ab, lapack_int ldab,
float* afb, lapack_int ldafb, lapack_int* ipiv,
char* equed, float* r, float* c, float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dgbsvxx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, double* ab, lapack_int ldab,
double* afb, lapack_int ldafb,
lapack_int* ipiv, char* equed, double* r,
double* c, double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cgbsvxx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, lapack_complex_float* ab,
lapack_int ldab, lapack_complex_float* afb,
lapack_int ldafb, lapack_int* ipiv,
char* equed, float* r, float* c,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zgbsvxx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int kl, lapack_int ku,
lapack_int nrhs, lapack_complex_double* ab,
lapack_int ldab, lapack_complex_double* afb,
lapack_int ldafb, lapack_int* ipiv,
char* equed, double* r, double* c,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_sgbtrf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, float* ab,
lapack_int ldab, lapack_int* ipiv );
lapack_int LAPACKE_dgbtrf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, double* ab,
lapack_int ldab, lapack_int* ipiv );
lapack_int LAPACKE_cgbtrf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
lapack_complex_float* ab, lapack_int ldab,
lapack_int* ipiv );
lapack_int LAPACKE_zgbtrf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku,
lapack_complex_double* ab, lapack_int ldab,
lapack_int* ipiv );
lapack_int LAPACKE_sgbtrs_work( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const float* ab, lapack_int ldab,
const lapack_int* ipiv, float* b,
lapack_int ldb );
lapack_int LAPACKE_dgbtrs_work( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const double* ab, lapack_int ldab,
const lapack_int* ipiv, double* b,
lapack_int ldb );
lapack_int LAPACKE_cgbtrs_work( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zgbtrs_work( int matrix_order, char trans, lapack_int n,
lapack_int kl, lapack_int ku, lapack_int nrhs,
const lapack_complex_double* ab,
lapack_int ldab, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sgebak_work( int matrix_order, char job, char side,
lapack_int n, lapack_int ilo, lapack_int ihi,
const float* scale, lapack_int m, float* v,
lapack_int ldv );
lapack_int LAPACKE_dgebak_work( int matrix_order, char job, char side,
lapack_int n, lapack_int ilo, lapack_int ihi,
const double* scale, lapack_int m, double* v,
lapack_int ldv );
lapack_int LAPACKE_cgebak_work( int matrix_order, char job, char side,
lapack_int n, lapack_int ilo, lapack_int ihi,
const float* scale, lapack_int m,
lapack_complex_float* v, lapack_int ldv );
lapack_int LAPACKE_zgebak_work( int matrix_order, char job, char side,
lapack_int n, lapack_int ilo, lapack_int ihi,
const double* scale, lapack_int m,
lapack_complex_double* v, lapack_int ldv );
lapack_int LAPACKE_sgebal_work( int matrix_order, char job, lapack_int n,
float* a, lapack_int lda, lapack_int* ilo,
lapack_int* ihi, float* scale );
lapack_int LAPACKE_dgebal_work( int matrix_order, char job, lapack_int n,
double* a, lapack_int lda, lapack_int* ilo,
lapack_int* ihi, double* scale );
lapack_int LAPACKE_cgebal_work( int matrix_order, char job, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ilo, lapack_int* ihi,
float* scale );
lapack_int LAPACKE_zgebal_work( int matrix_order, char job, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ilo, lapack_int* ihi,
double* scale );
lapack_int LAPACKE_sgebrd_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* d, float* e,
float* tauq, float* taup, float* work,
lapack_int lwork );
lapack_int LAPACKE_dgebrd_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* d, double* e,
double* tauq, double* taup, double* work,
lapack_int lwork );
lapack_int LAPACKE_cgebrd_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
float* d, float* e, lapack_complex_float* tauq,
lapack_complex_float* taup,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgebrd_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
double* d, double* e,
lapack_complex_double* tauq,
lapack_complex_double* taup,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sgecon_work( int matrix_order, char norm, lapack_int n,
const float* a, lapack_int lda, float anorm,
float* rcond, float* work, lapack_int* iwork );
lapack_int LAPACKE_dgecon_work( int matrix_order, char norm, lapack_int n,
const double* a, lapack_int lda, double anorm,
double* rcond, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cgecon_work( int matrix_order, char norm, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float anorm, float* rcond,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zgecon_work( int matrix_order, char norm, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double anorm, double* rcond,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sgeequ_work( int matrix_order, lapack_int m, lapack_int n,
const float* a, lapack_int lda, float* r,
float* c, float* rowcnd, float* colcnd,
float* amax );
lapack_int LAPACKE_dgeequ_work( int matrix_order, lapack_int m, lapack_int n,
const double* a, lapack_int lda, double* r,
double* c, double* rowcnd, double* colcnd,
double* amax );
lapack_int LAPACKE_cgeequ_work( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* r, float* c, float* rowcnd,
float* colcnd, float* amax );
lapack_int LAPACKE_zgeequ_work( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* r, double* c, double* rowcnd,
double* colcnd, double* amax );
lapack_int LAPACKE_sgeequb_work( int matrix_order, lapack_int m, lapack_int n,
const float* a, lapack_int lda, float* r,
float* c, float* rowcnd, float* colcnd,
float* amax );
lapack_int LAPACKE_dgeequb_work( int matrix_order, lapack_int m, lapack_int n,
const double* a, lapack_int lda, double* r,
double* c, double* rowcnd, double* colcnd,
double* amax );
lapack_int LAPACKE_cgeequb_work( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* r, float* c, float* rowcnd,
float* colcnd, float* amax );
lapack_int LAPACKE_zgeequb_work( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* r, double* c, double* rowcnd,
double* colcnd, double* amax );
lapack_int LAPACKE_sgees_work( int matrix_order, char jobvs, char sort,
LAPACK_S_SELECT2 select, lapack_int n, float* a,
lapack_int lda, lapack_int* sdim, float* wr,
float* wi, float* vs, lapack_int ldvs,
float* work, lapack_int lwork,
lapack_logical* bwork );
lapack_int LAPACKE_dgees_work( int matrix_order, char jobvs, char sort,
LAPACK_D_SELECT2 select, lapack_int n, double* a,
lapack_int lda, lapack_int* sdim, double* wr,
double* wi, double* vs, lapack_int ldvs,
double* work, lapack_int lwork,
lapack_logical* bwork );
lapack_int LAPACKE_cgees_work( int matrix_order, char jobvs, char sort,
LAPACK_C_SELECT1 select, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* sdim, lapack_complex_float* w,
lapack_complex_float* vs, lapack_int ldvs,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_logical* bwork );
lapack_int LAPACKE_zgees_work( int matrix_order, char jobvs, char sort,
LAPACK_Z_SELECT1 select, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* sdim, lapack_complex_double* w,
lapack_complex_double* vs, lapack_int ldvs,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_logical* bwork );
lapack_int LAPACKE_sgeesx_work( int matrix_order, char jobvs, char sort,
LAPACK_S_SELECT2 select, char sense,
lapack_int n, float* a, lapack_int lda,
lapack_int* sdim, float* wr, float* wi,
float* vs, lapack_int ldvs, float* rconde,
float* rcondv, float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork,
lapack_logical* bwork );
lapack_int LAPACKE_dgeesx_work( int matrix_order, char jobvs, char sort,
LAPACK_D_SELECT2 select, char sense,
lapack_int n, double* a, lapack_int lda,
lapack_int* sdim, double* wr, double* wi,
double* vs, lapack_int ldvs, double* rconde,
double* rcondv, double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork,
lapack_logical* bwork );
lapack_int LAPACKE_cgeesx_work( int matrix_order, char jobvs, char sort,
LAPACK_C_SELECT1 select, char sense,
lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_int* sdim,
lapack_complex_float* w,
lapack_complex_float* vs, lapack_int ldvs,
float* rconde, float* rcondv,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_logical* bwork );
lapack_int LAPACKE_zgeesx_work( int matrix_order, char jobvs, char sort,
LAPACK_Z_SELECT1 select, char sense,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_int* sdim,
lapack_complex_double* w,
lapack_complex_double* vs, lapack_int ldvs,
double* rconde, double* rcondv,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_logical* bwork );
lapack_int LAPACKE_sgeev_work( int matrix_order, char jobvl, char jobvr,
lapack_int n, float* a, lapack_int lda,
float* wr, float* wi, float* vl, lapack_int ldvl,
float* vr, lapack_int ldvr, float* work,
lapack_int lwork );
lapack_int LAPACKE_dgeev_work( int matrix_order, char jobvl, char jobvr,
lapack_int n, double* a, lapack_int lda,
double* wr, double* wi, double* vl,
lapack_int ldvl, double* vr, lapack_int ldvr,
double* work, lapack_int lwork );
lapack_int LAPACKE_cgeev_work( int matrix_order, char jobvl, char jobvr,
lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* w,
lapack_complex_float* vl, lapack_int ldvl,
lapack_complex_float* vr, lapack_int ldvr,
lapack_complex_float* work, lapack_int lwork,
float* rwork );
lapack_int LAPACKE_zgeev_work( int matrix_order, char jobvl, char jobvr,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* w,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_complex_double* work, lapack_int lwork,
double* rwork );
lapack_int LAPACKE_sgeevx_work( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n, float* a,
lapack_int lda, float* wr, float* wi, float* vl,
lapack_int ldvl, float* vr, lapack_int ldvr,
lapack_int* ilo, lapack_int* ihi, float* scale,
float* abnrm, float* rconde, float* rcondv,
float* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_dgeevx_work( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n, double* a,
lapack_int lda, double* wr, double* wi,
double* vl, lapack_int ldvl, double* vr,
lapack_int ldvr, lapack_int* ilo,
lapack_int* ihi, double* scale, double* abnrm,
double* rconde, double* rcondv, double* work,
lapack_int lwork, lapack_int* iwork );
lapack_int LAPACKE_cgeevx_work( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* w,
lapack_complex_float* vl, lapack_int ldvl,
lapack_complex_float* vr, lapack_int ldvr,
lapack_int* ilo, lapack_int* ihi, float* scale,
float* abnrm, float* rconde, float* rcondv,
lapack_complex_float* work, lapack_int lwork,
float* rwork );
lapack_int LAPACKE_zgeevx_work( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* w,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_int* ilo, lapack_int* ihi, double* scale,
double* abnrm, double* rconde, double* rcondv,
lapack_complex_double* work, lapack_int lwork,
double* rwork );
lapack_int LAPACKE_sgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, float* a, lapack_int lda,
float* tau, float* work, lapack_int lwork );
lapack_int LAPACKE_dgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, double* a, lapack_int lda,
double* tau, double* work, lapack_int lwork );
lapack_int LAPACKE_cgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sgejsv_work( int matrix_order, char joba, char jobu,
char jobv, char jobr, char jobt, char jobp,
lapack_int m, lapack_int n, float* a,
lapack_int lda, float* sva, float* u,
lapack_int ldu, float* v, lapack_int ldv,
float* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_dgejsv_work( int matrix_order, char joba, char jobu,
char jobv, char jobr, char jobt, char jobp,
lapack_int m, lapack_int n, double* a,
lapack_int lda, double* sva, double* u,
lapack_int ldu, double* v, lapack_int ldv,
double* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_sgelq2_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau,
float* work );
lapack_int LAPACKE_dgelq2_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau,
double* work );
lapack_int LAPACKE_cgelq2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau,
lapack_complex_float* work );
lapack_int LAPACKE_zgelq2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau,
lapack_complex_double* work );
lapack_int LAPACKE_sgelqf_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau,
float* work, lapack_int lwork );
lapack_int LAPACKE_dgelqf_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau,
double* work, lapack_int lwork );
lapack_int LAPACKE_cgelqf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgelqf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sgels_work( int matrix_order, char trans, lapack_int m,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* b, lapack_int ldb,
float* work, lapack_int lwork );
lapack_int LAPACKE_dgels_work( int matrix_order, char trans, lapack_int m,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* b, lapack_int ldb,
double* work, lapack_int lwork );
lapack_int LAPACKE_cgels_work( int matrix_order, char trans, lapack_int m,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgels_work( int matrix_order, char trans, lapack_int m,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sgelsd_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda,
float* b, lapack_int ldb, float* s, float rcond,
lapack_int* rank, float* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_dgelsd_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* b, lapack_int ldb, double* s,
double rcond, lapack_int* rank, double* work,
lapack_int lwork, lapack_int* iwork );
lapack_int LAPACKE_cgelsd_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, float* s, float rcond,
lapack_int* rank, lapack_complex_float* work,
lapack_int lwork, float* rwork,
lapack_int* iwork );
lapack_int LAPACKE_zgelsd_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, double* s, double rcond,
lapack_int* rank, lapack_complex_double* work,
lapack_int lwork, double* rwork,
lapack_int* iwork );
lapack_int LAPACKE_sgelss_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda,
float* b, lapack_int ldb, float* s, float rcond,
lapack_int* rank, float* work,
lapack_int lwork );
lapack_int LAPACKE_dgelss_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* b, lapack_int ldb, double* s,
double rcond, lapack_int* rank, double* work,
lapack_int lwork );
lapack_int LAPACKE_cgelss_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, float* s, float rcond,
lapack_int* rank, lapack_complex_float* work,
lapack_int lwork, float* rwork );
lapack_int LAPACKE_zgelss_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, double* s, double rcond,
lapack_int* rank, lapack_complex_double* work,
lapack_int lwork, double* rwork );
lapack_int LAPACKE_sgelsy_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda,
float* b, lapack_int ldb, lapack_int* jpvt,
float rcond, lapack_int* rank, float* work,
lapack_int lwork );
lapack_int LAPACKE_dgelsy_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* b, lapack_int ldb, lapack_int* jpvt,
double rcond, lapack_int* rank, double* work,
lapack_int lwork );
lapack_int LAPACKE_cgelsy_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, lapack_int* jpvt, float rcond,
lapack_int* rank, lapack_complex_float* work,
lapack_int lwork, float* rwork );
lapack_int LAPACKE_zgelsy_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_int* jpvt, double rcond,
lapack_int* rank, lapack_complex_double* work,
lapack_int lwork, double* rwork );
lapack_int LAPACKE_sgeqlf_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau,
float* work, lapack_int lwork );
lapack_int LAPACKE_dgeqlf_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau,
double* work, lapack_int lwork );
lapack_int LAPACKE_cgeqlf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgeqlf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sgeqp3_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, lapack_int* jpvt,
float* tau, float* work, lapack_int lwork );
lapack_int LAPACKE_dgeqp3_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, lapack_int* jpvt,
double* tau, double* work, lapack_int lwork );
lapack_int LAPACKE_cgeqp3_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* jpvt, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork,
float* rwork );
lapack_int LAPACKE_zgeqp3_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* jpvt, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork,
double* rwork );
lapack_int LAPACKE_sgeqpf_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, lapack_int* jpvt,
float* tau, float* work );
lapack_int LAPACKE_dgeqpf_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, lapack_int* jpvt,
double* tau, double* work );
lapack_int LAPACKE_cgeqpf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* jpvt, lapack_complex_float* tau,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zgeqpf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* jpvt, lapack_complex_double* tau,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sgeqr2_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau,
float* work );
lapack_int LAPACKE_dgeqr2_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau,
double* work );
lapack_int LAPACKE_cgeqr2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau,
lapack_complex_float* work );
lapack_int LAPACKE_zgeqr2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau,
lapack_complex_double* work );
lapack_int LAPACKE_sgeqrf_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau,
float* work, lapack_int lwork );
lapack_int LAPACKE_dgeqrf_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau,
double* work, lapack_int lwork );
lapack_int LAPACKE_cgeqrf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgeqrf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau,
float* work, lapack_int lwork );
lapack_int LAPACKE_dgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau,
double* work, lapack_int lwork );
lapack_int LAPACKE_cgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau,
lapack_complex_double* work,
lapack_int lwork );
lapack_int LAPACKE_sgerfs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const float* af, lapack_int ldaf,
const lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dgerfs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const double* a,
lapack_int lda, const double* af,
lapack_int ldaf, const lapack_int* ipiv,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* ferr, double* berr,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cgerfs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zgerfs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sgerfsx_work( int matrix_order, char trans, char equed,
lapack_int n, lapack_int nrhs, const float* a,
lapack_int lda, const float* af,
lapack_int ldaf, const lapack_int* ipiv,
const float* r, const float* c, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dgerfsx_work( int matrix_order, char trans, char equed,
lapack_int n, lapack_int nrhs, const double* a,
lapack_int lda, const double* af,
lapack_int ldaf, const lapack_int* ipiv,
const double* r, const double* c,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cgerfsx_work( int matrix_order, char trans, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* af,
lapack_int ldaf, const lapack_int* ipiv,
const float* r, const float* c,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zgerfsx_work( int matrix_order, char trans, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* af,
lapack_int ldaf, const lapack_int* ipiv,
const double* r, const double* c,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_sgerqf_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau,
float* work, lapack_int lwork );
lapack_int LAPACKE_dgerqf_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau,
double* work, lapack_int lwork );
lapack_int LAPACKE_cgerqf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgerqf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sgesdd_work( int matrix_order, char jobz, lapack_int m,
lapack_int n, float* a, lapack_int lda,
float* s, float* u, lapack_int ldu, float* vt,
lapack_int ldvt, float* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_dgesdd_work( int matrix_order, char jobz, lapack_int m,
lapack_int n, double* a, lapack_int lda,
double* s, double* u, lapack_int ldu,
double* vt, lapack_int ldvt, double* work,
lapack_int lwork, lapack_int* iwork );
lapack_int LAPACKE_cgesdd_work( int matrix_order, char jobz, lapack_int m,
lapack_int n, lapack_complex_float* a,
lapack_int lda, float* s,
lapack_complex_float* u, lapack_int ldu,
lapack_complex_float* vt, lapack_int ldvt,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_int* iwork );
lapack_int LAPACKE_zgesdd_work( int matrix_order, char jobz, lapack_int m,
lapack_int n, lapack_complex_double* a,
lapack_int lda, double* s,
lapack_complex_double* u, lapack_int ldu,
lapack_complex_double* vt, lapack_int ldvt,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_int* iwork );
lapack_int LAPACKE_sgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,
float* a, lapack_int lda, lapack_int* ipiv,
float* b, lapack_int ldb );
lapack_int LAPACKE_dgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,
double* a, lapack_int lda, lapack_int* ipiv,
double* b, lapack_int ldb );
lapack_int LAPACKE_cgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_dsgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,
double* a, lapack_int lda, lapack_int* ipiv,
double* b, lapack_int ldb, double* x,
lapack_int ldx, double* work, float* swork,
lapack_int* iter );
lapack_int LAPACKE_zcgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, lapack_complex_double* work,
lapack_complex_float* swork, double* rwork,
lapack_int* iter );
lapack_int LAPACKE_sgesvd_work( int matrix_order, char jobu, char jobvt,
lapack_int m, lapack_int n, float* a,
lapack_int lda, float* s, float* u,
lapack_int ldu, float* vt, lapack_int ldvt,
float* work, lapack_int lwork );
lapack_int LAPACKE_dgesvd_work( int matrix_order, char jobu, char jobvt,
lapack_int m, lapack_int n, double* a,
lapack_int lda, double* s, double* u,
lapack_int ldu, double* vt, lapack_int ldvt,
double* work, lapack_int lwork );
lapack_int LAPACKE_cgesvd_work( int matrix_order, char jobu, char jobvt,
lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
float* s, lapack_complex_float* u,
lapack_int ldu, lapack_complex_float* vt,
lapack_int ldvt, lapack_complex_float* work,
lapack_int lwork, float* rwork );
lapack_int LAPACKE_zgesvd_work( int matrix_order, char jobu, char jobvt,
lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
double* s, lapack_complex_double* u,
lapack_int ldu, lapack_complex_double* vt,
lapack_int ldvt, lapack_complex_double* work,
lapack_int lwork, double* rwork );
lapack_int LAPACKE_sgesvj_work( int matrix_order, char joba, char jobu,
char jobv, lapack_int m, lapack_int n, float* a,
lapack_int lda, float* sva, lapack_int mv,
float* v, lapack_int ldv, float* work,
lapack_int lwork );
lapack_int LAPACKE_dgesvj_work( int matrix_order, char joba, char jobu,
char jobv, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* sva,
lapack_int mv, double* v, lapack_int ldv,
double* work, lapack_int lwork );
lapack_int LAPACKE_sgesvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* r,
float* c, float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, float* work, lapack_int* iwork );
lapack_int LAPACKE_dgesvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* r,
double* c, double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* ferr,
double* berr, double* work, lapack_int* iwork );
lapack_int LAPACKE_cgesvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* r,
float* c, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zgesvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* r,
double* c, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* ferr,
double* berr, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_sgesvxx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* r,
float* c, float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* rpvgrw,
float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dgesvxx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* r,
double* c, double* b, lapack_int ldb,
double* x, lapack_int ldx, double* rcond,
double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cgesvxx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* r,
float* c, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* rpvgrw,
float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zgesvxx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* r,
double* c, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* rpvgrw,
double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sgetf2_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_dgetf2_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_cgetf2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_zgetf2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_sgetrf_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_dgetrf_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, lapack_int* ipiv );
lapack_int LAPACKE_cgetrf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_zgetrf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv );
lapack_int LAPACKE_sgetri_work( int matrix_order, lapack_int n, float* a,
lapack_int lda, const lapack_int* ipiv,
float* work, lapack_int lwork );
lapack_int LAPACKE_dgetri_work( int matrix_order, lapack_int n, double* a,
lapack_int lda, const lapack_int* ipiv,
double* work, lapack_int lwork );
lapack_int LAPACKE_cgetri_work( int matrix_order, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgetri_work( int matrix_order, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sgetrs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const lapack_int* ipiv, float* b,
lapack_int ldb );
lapack_int LAPACKE_dgetrs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const double* a,
lapack_int lda, const lapack_int* ipiv,
double* b, lapack_int ldb );
lapack_int LAPACKE_cgetrs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zgetrs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sggbak_work( int matrix_order, char job, char side,
lapack_int n, lapack_int ilo, lapack_int ihi,
const float* lscale, const float* rscale,
lapack_int m, float* v, lapack_int ldv );
lapack_int LAPACKE_dggbak_work( int matrix_order, char job, char side,
lapack_int n, lapack_int ilo, lapack_int ihi,
const double* lscale, const double* rscale,
lapack_int m, double* v, lapack_int ldv );
lapack_int LAPACKE_cggbak_work( int matrix_order, char job, char side,
lapack_int n, lapack_int ilo, lapack_int ihi,
const float* lscale, const float* rscale,
lapack_int m, lapack_complex_float* v,
lapack_int ldv );
lapack_int LAPACKE_zggbak_work( int matrix_order, char job, char side,
lapack_int n, lapack_int ilo, lapack_int ihi,
const double* lscale, const double* rscale,
lapack_int m, lapack_complex_double* v,
lapack_int ldv );
lapack_int LAPACKE_sggbal_work( int matrix_order, char job, lapack_int n,
float* a, lapack_int lda, float* b,
lapack_int ldb, lapack_int* ilo,
lapack_int* ihi, float* lscale, float* rscale,
float* work );
lapack_int LAPACKE_dggbal_work( int matrix_order, char job, lapack_int n,
double* a, lapack_int lda, double* b,
lapack_int ldb, lapack_int* ilo,
lapack_int* ihi, double* lscale, double* rscale,
double* work );
lapack_int LAPACKE_cggbal_work( int matrix_order, char job, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_int* ilo, lapack_int* ihi, float* lscale,
float* rscale, float* work );
lapack_int LAPACKE_zggbal_work( int matrix_order, char job, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_int* ilo, lapack_int* ihi,
double* lscale, double* rscale, double* work );
lapack_int LAPACKE_sgges_work( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_S_SELECT3 selctg, lapack_int n,
float* a, lapack_int lda, float* b,
lapack_int ldb, lapack_int* sdim, float* alphar,
float* alphai, float* beta, float* vsl,
lapack_int ldvsl, float* vsr, lapack_int ldvsr,
float* work, lapack_int lwork,
lapack_logical* bwork );
lapack_int LAPACKE_dgges_work( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_D_SELECT3 selctg, lapack_int n,
double* a, lapack_int lda, double* b,
lapack_int ldb, lapack_int* sdim, double* alphar,
double* alphai, double* beta, double* vsl,
lapack_int ldvsl, double* vsr, lapack_int ldvsr,
double* work, lapack_int lwork,
lapack_logical* bwork );
lapack_int LAPACKE_cgges_work( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_C_SELECT2 selctg, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_int* sdim, lapack_complex_float* alpha,
lapack_complex_float* beta,
lapack_complex_float* vsl, lapack_int ldvsl,
lapack_complex_float* vsr, lapack_int ldvsr,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_logical* bwork );
lapack_int LAPACKE_zgges_work( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_Z_SELECT2 selctg, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_int* sdim, lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* vsl, lapack_int ldvsl,
lapack_complex_double* vsr, lapack_int ldvsr,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_logical* bwork );
lapack_int LAPACKE_sggesx_work( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_S_SELECT3 selctg, char sense,
lapack_int n, float* a, lapack_int lda,
float* b, lapack_int ldb, lapack_int* sdim,
float* alphar, float* alphai, float* beta,
float* vsl, lapack_int ldvsl, float* vsr,
lapack_int ldvsr, float* rconde, float* rcondv,
float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork,
lapack_logical* bwork );
lapack_int LAPACKE_dggesx_work( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_D_SELECT3 selctg, char sense,
lapack_int n, double* a, lapack_int lda,
double* b, lapack_int ldb, lapack_int* sdim,
double* alphar, double* alphai, double* beta,
double* vsl, lapack_int ldvsl, double* vsr,
lapack_int ldvsr, double* rconde,
double* rcondv, double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork,
lapack_logical* bwork );
lapack_int LAPACKE_cggesx_work( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_C_SELECT2 selctg, char sense,
lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, lapack_int* sdim,
lapack_complex_float* alpha,
lapack_complex_float* beta,
lapack_complex_float* vsl, lapack_int ldvsl,
lapack_complex_float* vsr, lapack_int ldvsr,
float* rconde, float* rcondv,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_int* iwork,
lapack_int liwork, lapack_logical* bwork );
lapack_int LAPACKE_zggesx_work( int matrix_order, char jobvsl, char jobvsr,
char sort, LAPACK_Z_SELECT2 selctg, char sense,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_int* sdim,
lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* vsl, lapack_int ldvsl,
lapack_complex_double* vsr, lapack_int ldvsr,
double* rconde, double* rcondv,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_int* iwork,
lapack_int liwork, lapack_logical* bwork );
lapack_int LAPACKE_sggev_work( int matrix_order, char jobvl, char jobvr,
lapack_int n, float* a, lapack_int lda, float* b,
lapack_int ldb, float* alphar, float* alphai,
float* beta, float* vl, lapack_int ldvl,
float* vr, lapack_int ldvr, float* work,
lapack_int lwork );
lapack_int LAPACKE_dggev_work( int matrix_order, char jobvl, char jobvr,
lapack_int n, double* a, lapack_int lda,
double* b, lapack_int ldb, double* alphar,
double* alphai, double* beta, double* vl,
lapack_int ldvl, double* vr, lapack_int ldvr,
double* work, lapack_int lwork );
lapack_int LAPACKE_cggev_work( int matrix_order, char jobvl, char jobvr,
lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* alpha,
lapack_complex_float* beta,
lapack_complex_float* vl, lapack_int ldvl,
lapack_complex_float* vr, lapack_int ldvr,
lapack_complex_float* work, lapack_int lwork,
float* rwork );
lapack_int LAPACKE_zggev_work( int matrix_order, char jobvl, char jobvr,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_complex_double* work, lapack_int lwork,
double* rwork );
lapack_int LAPACKE_sggevx_work( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb,
float* alphar, float* alphai, float* beta,
float* vl, lapack_int ldvl, float* vr,
lapack_int ldvr, lapack_int* ilo,
lapack_int* ihi, float* lscale, float* rscale,
float* abnrm, float* bbnrm, float* rconde,
float* rcondv, float* work, lapack_int lwork,
lapack_int* iwork, lapack_logical* bwork );
lapack_int LAPACKE_dggevx_work( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb,
double* alphar, double* alphai, double* beta,
double* vl, lapack_int ldvl, double* vr,
lapack_int ldvr, lapack_int* ilo,
lapack_int* ihi, double* lscale, double* rscale,
double* abnrm, double* bbnrm, double* rconde,
double* rcondv, double* work, lapack_int lwork,
lapack_int* iwork, lapack_logical* bwork );
lapack_int LAPACKE_cggevx_work( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* alpha,
lapack_complex_float* beta,
lapack_complex_float* vl, lapack_int ldvl,
lapack_complex_float* vr, lapack_int ldvr,
lapack_int* ilo, lapack_int* ihi, float* lscale,
float* rscale, float* abnrm, float* bbnrm,
float* rconde, float* rcondv,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_int* iwork,
lapack_logical* bwork );
lapack_int LAPACKE_zggevx_work( int matrix_order, char balanc, char jobvl,
char jobvr, char sense, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_int* ilo, lapack_int* ihi,
double* lscale, double* rscale, double* abnrm,
double* bbnrm, double* rconde, double* rcondv,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_int* iwork,
lapack_logical* bwork );
lapack_int LAPACKE_sggglm_work( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, float* a, lapack_int lda,
float* b, lapack_int ldb, float* d, float* x,
float* y, float* work, lapack_int lwork );
lapack_int LAPACKE_dggglm_work( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, double* a, lapack_int lda,
double* b, lapack_int ldb, double* d, double* x,
double* y, double* work, lapack_int lwork );
lapack_int LAPACKE_cggglm_work( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* d,
lapack_complex_float* x,
lapack_complex_float* y,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zggglm_work( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* d,
lapack_complex_double* x,
lapack_complex_double* y,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sgghrd_work( int matrix_order, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
float* a, lapack_int lda, float* b,
lapack_int ldb, float* q, lapack_int ldq,
float* z, lapack_int ldz );
lapack_int LAPACKE_dgghrd_work( int matrix_order, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
double* a, lapack_int lda, double* b,
lapack_int ldb, double* q, lapack_int ldq,
double* z, lapack_int ldz );
lapack_int LAPACKE_cgghrd_work( int matrix_order, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* z, lapack_int ldz );
lapack_int LAPACKE_zgghrd_work( int matrix_order, char compq, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* z, lapack_int ldz );
lapack_int LAPACKE_sgglse_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int p, float* a, lapack_int lda,
float* b, lapack_int ldb, float* c, float* d,
float* x, float* work, lapack_int lwork );
lapack_int LAPACKE_dgglse_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int p, double* a, lapack_int lda,
double* b, lapack_int ldb, double* c, double* d,
double* x, double* work, lapack_int lwork );
lapack_int LAPACKE_cgglse_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int p, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* c,
lapack_complex_float* d,
lapack_complex_float* x,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zgglse_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int p, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* c,
lapack_complex_double* d,
lapack_complex_double* x,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sggqrf_work( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, float* a, lapack_int lda,
float* taua, float* b, lapack_int ldb,
float* taub, float* work, lapack_int lwork );
lapack_int LAPACKE_dggqrf_work( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, double* a, lapack_int lda,
double* taua, double* b, lapack_int ldb,
double* taub, double* work, lapack_int lwork );
lapack_int LAPACKE_cggqrf_work( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* taua,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* taub,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zggqrf_work( int matrix_order, lapack_int n, lapack_int m,
lapack_int p, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* taua,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* taub,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sggrqf_work( int matrix_order, lapack_int m, lapack_int p,
lapack_int n, float* a, lapack_int lda,
float* taua, float* b, lapack_int ldb,
float* taub, float* work, lapack_int lwork );
lapack_int LAPACKE_dggrqf_work( int matrix_order, lapack_int m, lapack_int p,
lapack_int n, double* a, lapack_int lda,
double* taua, double* b, lapack_int ldb,
double* taub, double* work, lapack_int lwork );
lapack_int LAPACKE_cggrqf_work( int matrix_order, lapack_int m, lapack_int p,
lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* taua,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* taub,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zggrqf_work( int matrix_order, lapack_int m, lapack_int p,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* taua,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* taub,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_sggsvd_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int n,
lapack_int p, lapack_int* k, lapack_int* l,
float* a, lapack_int lda, float* b,
lapack_int ldb, float* alpha, float* beta,
float* u, lapack_int ldu, float* v,
lapack_int ldv, float* q, lapack_int ldq,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dggsvd_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int n,
lapack_int p, lapack_int* k, lapack_int* l,
double* a, lapack_int lda, double* b,
lapack_int ldb, double* alpha, double* beta,
double* u, lapack_int ldu, double* v,
lapack_int ldv, double* q, lapack_int ldq,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cggsvd_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int n,
lapack_int p, lapack_int* k, lapack_int* l,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
float* alpha, float* beta,
lapack_complex_float* u, lapack_int ldu,
lapack_complex_float* v, lapack_int ldv,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* work, float* rwork,
lapack_int* iwork );
lapack_int LAPACKE_zggsvd_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int n,
lapack_int p, lapack_int* k, lapack_int* l,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
double* alpha, double* beta,
lapack_complex_double* u, lapack_int ldu,
lapack_complex_double* v, lapack_int ldv,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* work, double* rwork,
lapack_int* iwork );
lapack_int LAPACKE_sggsvp_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int p,
lapack_int n, float* a, lapack_int lda,
float* b, lapack_int ldb, float tola,
float tolb, lapack_int* k, lapack_int* l,
float* u, lapack_int ldu, float* v,
lapack_int ldv, float* q, lapack_int ldq,
lapack_int* iwork, float* tau, float* work );
lapack_int LAPACKE_dggsvp_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int p,
lapack_int n, double* a, lapack_int lda,
double* b, lapack_int ldb, double tola,
double tolb, lapack_int* k, lapack_int* l,
double* u, lapack_int ldu, double* v,
lapack_int ldv, double* q, lapack_int ldq,
lapack_int* iwork, double* tau, double* work );
lapack_int LAPACKE_cggsvp_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int p,
lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, float tola, float tolb,
lapack_int* k, lapack_int* l,
lapack_complex_float* u, lapack_int ldu,
lapack_complex_float* v, lapack_int ldv,
lapack_complex_float* q, lapack_int ldq,
lapack_int* iwork, float* rwork,
lapack_complex_float* tau,
lapack_complex_float* work );
lapack_int LAPACKE_zggsvp_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int p,
lapack_int n, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, double tola, double tolb,
lapack_int* k, lapack_int* l,
lapack_complex_double* u, lapack_int ldu,
lapack_complex_double* v, lapack_int ldv,
lapack_complex_double* q, lapack_int ldq,
lapack_int* iwork, double* rwork,
lapack_complex_double* tau,
lapack_complex_double* work );
lapack_int LAPACKE_sgtcon_work( char norm, lapack_int n, const float* dl,
const float* d, const float* du,
const float* du2, const lapack_int* ipiv,
float anorm, float* rcond, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dgtcon_work( char norm, lapack_int n, const double* dl,
const double* d, const double* du,
const double* du2, const lapack_int* ipiv,
double anorm, double* rcond, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cgtcon_work( char norm, lapack_int n,
const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
const lapack_complex_float* du2,
const lapack_int* ipiv, float anorm,
float* rcond, lapack_complex_float* work );
lapack_int LAPACKE_zgtcon_work( char norm, lapack_int n,
const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
const lapack_complex_double* du2,
const lapack_int* ipiv, double anorm,
double* rcond, lapack_complex_double* work );
lapack_int LAPACKE_sgtrfs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const float* dl,
const float* d, const float* du,
const float* dlf, const float* df,
const float* duf, const float* du2,
const lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dgtrfs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const double* dl,
const double* d, const double* du,
const double* dlf, const double* df,
const double* duf, const double* du2,
const lapack_int* ipiv, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* ferr, double* berr, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cgtrfs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
const lapack_complex_float* dlf,
const lapack_complex_float* df,
const lapack_complex_float* duf,
const lapack_complex_float* du2,
const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zgtrfs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs,
const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
const lapack_complex_double* dlf,
const lapack_complex_double* df,
const lapack_complex_double* duf,
const lapack_complex_double* du2,
const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,
float* dl, float* d, float* du, float* b,
lapack_int ldb );
lapack_int LAPACKE_dgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,
double* dl, double* d, double* du, double* b,
lapack_int ldb );
lapack_int LAPACKE_cgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_float* dl,
lapack_complex_float* d,
lapack_complex_float* du,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,
lapack_complex_double* dl,
lapack_complex_double* d,
lapack_complex_double* du,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sgtsvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, const float* dl,
const float* d, const float* du, float* dlf,
float* df, float* duf, float* du2,
lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dgtsvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, const double* dl,
const double* d, const double* du, double* dlf,
double* df, double* duf, double* du2,
lapack_int* ipiv, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cgtsvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
lapack_complex_float* dlf,
lapack_complex_float* df,
lapack_complex_float* duf,
lapack_complex_float* du2, lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zgtsvx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
lapack_complex_double* dlf,
lapack_complex_double* df,
lapack_complex_double* duf,
lapack_complex_double* du2, lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sgttrf_work( lapack_int n, float* dl, float* d, float* du,
float* du2, lapack_int* ipiv );
lapack_int LAPACKE_dgttrf_work( lapack_int n, double* dl, double* d, double* du,
double* du2, lapack_int* ipiv );
lapack_int LAPACKE_cgttrf_work( lapack_int n, lapack_complex_float* dl,
lapack_complex_float* d,
lapack_complex_float* du,
lapack_complex_float* du2, lapack_int* ipiv );
lapack_int LAPACKE_zgttrf_work( lapack_int n, lapack_complex_double* dl,
lapack_complex_double* d,
lapack_complex_double* du,
lapack_complex_double* du2, lapack_int* ipiv );
lapack_int LAPACKE_sgttrs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const float* dl,
const float* d, const float* du,
const float* du2, const lapack_int* ipiv,
float* b, lapack_int ldb );
lapack_int LAPACKE_dgttrs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const double* dl,
const double* d, const double* du,
const double* du2, const lapack_int* ipiv,
double* b, lapack_int ldb );
lapack_int LAPACKE_cgttrs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs, const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
const lapack_complex_float* du2,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zgttrs_work( int matrix_order, char trans, lapack_int n,
lapack_int nrhs,
const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
const lapack_complex_double* du2,
const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_chbev_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int kd,
lapack_complex_float* ab, lapack_int ldab,
float* w, lapack_complex_float* z,
lapack_int ldz, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zhbev_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int kd,
lapack_complex_double* ab, lapack_int ldab,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_chbevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int kd,
lapack_complex_float* ab, lapack_int ldab,
float* w, lapack_complex_float* z,
lapack_int ldz, lapack_complex_float* work,
lapack_int lwork, float* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_zhbevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int kd,
lapack_complex_double* ab, lapack_int ldab,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
lapack_int lwork, double* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_chbevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, lapack_int kd,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* q, lapack_int ldq,
float vl, float vu, lapack_int il,
lapack_int iu, float abstol, lapack_int* m,
float* w, lapack_complex_float* z,
lapack_int ldz, lapack_complex_float* work,
float* rwork, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_zhbevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, lapack_int kd,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* q, lapack_int ldq,
double vl, double vu, lapack_int il,
lapack_int iu, double abstol, lapack_int* m,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
double* rwork, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_chbgst_work( int matrix_order, char vect, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
lapack_complex_float* ab, lapack_int ldab,
const lapack_complex_float* bb, lapack_int ldbb,
lapack_complex_float* x, lapack_int ldx,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zhbgst_work( int matrix_order, char vect, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
lapack_complex_double* ab, lapack_int ldab,
const lapack_complex_double* bb,
lapack_int ldbb, lapack_complex_double* x,
lapack_int ldx, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_chbgv_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* bb, lapack_int ldbb,
float* w, lapack_complex_float* z,
lapack_int ldz, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zhbgv_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* bb, lapack_int ldbb,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_chbgvd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* bb, lapack_int ldbb,
float* w, lapack_complex_float* z,
lapack_int ldz, lapack_complex_float* work,
lapack_int lwork, float* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_zhbgvd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* bb, lapack_int ldbb,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
lapack_int lwork, double* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_chbgvx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, lapack_int ka,
lapack_int kb, lapack_complex_float* ab,
lapack_int ldab, lapack_complex_float* bb,
lapack_int ldbb, lapack_complex_float* q,
lapack_int ldq, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_complex_float* work, float* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_zhbgvx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, lapack_int ka,
lapack_int kb, lapack_complex_double* ab,
lapack_int ldab, lapack_complex_double* bb,
lapack_int ldbb, lapack_complex_double* q,
lapack_int ldq, double vl, double vu,
lapack_int il, lapack_int iu, double abstol,
lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_complex_double* work, double* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_chbtrd_work( int matrix_order, char vect, char uplo,
lapack_int n, lapack_int kd,
lapack_complex_float* ab, lapack_int ldab,
float* d, float* e, lapack_complex_float* q,
lapack_int ldq, lapack_complex_float* work );
lapack_int LAPACKE_zhbtrd_work( int matrix_order, char vect, char uplo,
lapack_int n, lapack_int kd,
lapack_complex_double* ab, lapack_int ldab,
double* d, double* e, lapack_complex_double* q,
lapack_int ldq, lapack_complex_double* work );
lapack_int LAPACKE_checon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv, float anorm,
float* rcond, lapack_complex_float* work );
lapack_int LAPACKE_zhecon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv, double anorm,
double* rcond, lapack_complex_double* work );
lapack_int LAPACKE_cheequb_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* s, float* scond, float* amax,
lapack_complex_float* work );
lapack_int LAPACKE_zheequb_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* s, double* scond, double* amax,
lapack_complex_double* work );
lapack_int LAPACKE_cheev_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_complex_float* a,
lapack_int lda, float* w,
lapack_complex_float* work, lapack_int lwork,
float* rwork );
lapack_int LAPACKE_zheev_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_complex_double* a,
lapack_int lda, double* w,
lapack_complex_double* work, lapack_int lwork,
double* rwork );
lapack_int LAPACKE_cheevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_complex_float* a,
lapack_int lda, float* w,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_int lrwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_zheevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_complex_double* a,
lapack_int lda, double* w,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_int lrwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_cheevr_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
float vl, float vu, lapack_int il,
lapack_int iu, float abstol, lapack_int* m,
float* w, lapack_complex_float* z,
lapack_int ldz, lapack_int* isuppz,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_int lrwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_zheevr_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
double vl, double vu, lapack_int il,
lapack_int iu, double abstol, lapack_int* m,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_int* isuppz,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_int lrwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_cheevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
float vl, float vu, lapack_int il,
lapack_int iu, float abstol, lapack_int* m,
float* w, lapack_complex_float* z,
lapack_int ldz, lapack_complex_float* work,
lapack_int lwork, float* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_zheevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
double vl, double vu, lapack_int il,
lapack_int iu, double abstol, lapack_int* m,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
lapack_int lwork, double* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_chegst_work( int matrix_order, lapack_int itype, char uplo,
lapack_int n, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zhegst_work( int matrix_order, lapack_int itype, char uplo,
lapack_int n, lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_chegv_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb, float* w,
lapack_complex_float* work, lapack_int lwork,
float* rwork );
lapack_int LAPACKE_zhegv_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
double* w, lapack_complex_double* work,
lapack_int lwork, double* rwork );
lapack_int LAPACKE_chegvd_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
float* w, lapack_complex_float* work,
lapack_int lwork, float* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_zhegvd_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
double* w, lapack_complex_double* work,
lapack_int lwork, double* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_chegvx_work( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
float vl, float vu, lapack_int il,
lapack_int iu, float abstol, lapack_int* m,
float* w, lapack_complex_float* z,
lapack_int ldz, lapack_complex_float* work,
lapack_int lwork, float* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_zhegvx_work( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
double vl, double vu, lapack_int il,
lapack_int iu, double abstol, lapack_int* m,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
lapack_int lwork, double* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_cherfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zherfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_cherfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* af,
lapack_int ldaf, const lapack_int* ipiv,
const float* s, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zherfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* af,
lapack_int ldaf, const lapack_int* ipiv,
const double* s,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_chesv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zhesv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_chesvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, lapack_complex_float* work,
lapack_int lwork, float* rwork );
lapack_int LAPACKE_zhesvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, lapack_int lwork,
double* rwork );
lapack_int LAPACKE_chesvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* s,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zhesvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* s,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_chetrd_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
float* d, float* e, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zhetrd_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
double* d, double* e,
lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_chetrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_float* work,
lapack_int lwork );
lapack_int LAPACKE_zhetrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_double* work,
lapack_int lwork );
lapack_int LAPACKE_chetri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_float* work );
lapack_int LAPACKE_zhetri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_double* work );
lapack_int LAPACKE_chetrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zhetrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_chfrk_work( int matrix_order, char transr, char uplo,
char trans, lapack_int n, lapack_int k,
float alpha, const lapack_complex_float* a,
lapack_int lda, float beta,
lapack_complex_float* c );
lapack_int LAPACKE_zhfrk_work( int matrix_order, char transr, char uplo,
char trans, lapack_int n, lapack_int k,
double alpha, const lapack_complex_double* a,
lapack_int lda, double beta,
lapack_complex_double* c );
lapack_int LAPACKE_shgeqz_work( int matrix_order, char job, char compq,
char compz, lapack_int n, lapack_int ilo,
lapack_int ihi, float* h, lapack_int ldh,
float* t, lapack_int ldt, float* alphar,
float* alphai, float* beta, float* q,
lapack_int ldq, float* z, lapack_int ldz,
float* work, lapack_int lwork );
lapack_int LAPACKE_dhgeqz_work( int matrix_order, char job, char compq,
char compz, lapack_int n, lapack_int ilo,
lapack_int ihi, double* h, lapack_int ldh,
double* t, lapack_int ldt, double* alphar,
double* alphai, double* beta, double* q,
lapack_int ldq, double* z, lapack_int ldz,
double* work, lapack_int lwork );
lapack_int LAPACKE_chgeqz_work( int matrix_order, char job, char compq,
char compz, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_float* h,
lapack_int ldh, lapack_complex_float* t,
lapack_int ldt, lapack_complex_float* alpha,
lapack_complex_float* beta,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* z, lapack_int ldz,
lapack_complex_float* work, lapack_int lwork,
float* rwork );
lapack_int LAPACKE_zhgeqz_work( int matrix_order, char job, char compq,
char compz, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_double* h,
lapack_int ldh, lapack_complex_double* t,
lapack_int ldt, lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* z, lapack_int ldz,
lapack_complex_double* work, lapack_int lwork,
double* rwork );
lapack_int LAPACKE_chpcon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap,
const lapack_int* ipiv, float anorm,
float* rcond, lapack_complex_float* work );
lapack_int LAPACKE_zhpcon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap,
const lapack_int* ipiv, double anorm,
double* rcond, lapack_complex_double* work );
lapack_int LAPACKE_chpev_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_complex_float* ap, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zhpev_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_complex_double* ap,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_chpevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_complex_float* ap,
float* w, lapack_complex_float* z,
lapack_int ldz, lapack_complex_float* work,
lapack_int lwork, float* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_zhpevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_complex_double* ap,
double* w, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
lapack_int lwork, double* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_chpevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n,
lapack_complex_float* ap, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_complex_float* work, float* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_zhpevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n,
lapack_complex_double* ap, double vl, double vu,
lapack_int il, lapack_int iu, double abstol,
lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_complex_double* work, double* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_chpgst_work( int matrix_order, lapack_int itype, char uplo,
lapack_int n, lapack_complex_float* ap,
const lapack_complex_float* bp );
lapack_int LAPACKE_zhpgst_work( int matrix_order, lapack_int itype, char uplo,
lapack_int n, lapack_complex_double* ap,
const lapack_complex_double* bp );
lapack_int LAPACKE_chpgv_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n,
lapack_complex_float* ap,
lapack_complex_float* bp, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zhpgv_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n,
lapack_complex_double* ap,
lapack_complex_double* bp, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_chpgvd_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n,
lapack_complex_float* ap,
lapack_complex_float* bp, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_int lrwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_zhpgvd_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n,
lapack_complex_double* ap,
lapack_complex_double* bp, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_int lrwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_chpgvx_work( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n,
lapack_complex_float* ap,
lapack_complex_float* bp, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_complex_float* work, float* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_zhpgvx_work( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n,
lapack_complex_double* ap,
lapack_complex_double* bp, double vl, double vu,
lapack_int il, lapack_int iu, double abstol,
lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_complex_double* work, double* rwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_chprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_complex_float* afp,
const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zhprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs,
const lapack_complex_double* ap,
const lapack_complex_double* afp,
const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_chpsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* ap,
lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zhpsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* ap,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_chpsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* ap,
lapack_complex_float* afp, lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zhpsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* ap,
lapack_complex_double* afp, lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_chptrd_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap, float* d, float* e,
lapack_complex_float* tau );
lapack_int LAPACKE_zhptrd_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap, double* d, double* e,
lapack_complex_double* tau );
lapack_int LAPACKE_chptrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap, lapack_int* ipiv );
lapack_int LAPACKE_zhptrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap, lapack_int* ipiv );
lapack_int LAPACKE_chptri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap,
const lapack_int* ipiv,
lapack_complex_float* work );
lapack_int LAPACKE_zhptri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap,
const lapack_int* ipiv,
lapack_complex_double* work );
lapack_int LAPACKE_chptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zhptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs,
const lapack_complex_double* ap,
const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_shsein_work( int matrix_order, char job, char eigsrc,
char initv, lapack_logical* select,
lapack_int n, const float* h, lapack_int ldh,
float* wr, const float* wi, float* vl,
lapack_int ldvl, float* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m, float* work,
lapack_int* ifaill, lapack_int* ifailr );
lapack_int LAPACKE_dhsein_work( int matrix_order, char job, char eigsrc,
char initv, lapack_logical* select,
lapack_int n, const double* h, lapack_int ldh,
double* wr, const double* wi, double* vl,
lapack_int ldvl, double* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m, double* work,
lapack_int* ifaill, lapack_int* ifailr );
lapack_int LAPACKE_chsein_work( int matrix_order, char job, char eigsrc,
char initv, const lapack_logical* select,
lapack_int n, const lapack_complex_float* h,
lapack_int ldh, lapack_complex_float* w,
lapack_complex_float* vl, lapack_int ldvl,
lapack_complex_float* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m,
lapack_complex_float* work, float* rwork,
lapack_int* ifaill, lapack_int* ifailr );
lapack_int LAPACKE_zhsein_work( int matrix_order, char job, char eigsrc,
char initv, const lapack_logical* select,
lapack_int n, const lapack_complex_double* h,
lapack_int ldh, lapack_complex_double* w,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m,
lapack_complex_double* work, double* rwork,
lapack_int* ifaill, lapack_int* ifailr );
lapack_int LAPACKE_shseqr_work( int matrix_order, char job, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
float* h, lapack_int ldh, float* wr, float* wi,
float* z, lapack_int ldz, float* work,
lapack_int lwork );
lapack_int LAPACKE_dhseqr_work( int matrix_order, char job, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
double* h, lapack_int ldh, double* wr,
double* wi, double* z, lapack_int ldz,
double* work, lapack_int lwork );
lapack_int LAPACKE_chseqr_work( int matrix_order, char job, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
lapack_complex_float* h, lapack_int ldh,
lapack_complex_float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zhseqr_work( int matrix_order, char job, char compz,
lapack_int n, lapack_int ilo, lapack_int ihi,
lapack_complex_double* h, lapack_int ldh,
lapack_complex_double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_clacgv_work( lapack_int n, lapack_complex_float* x,
lapack_int incx );
lapack_int LAPACKE_zlacgv_work( lapack_int n, lapack_complex_double* x,
lapack_int incx );
lapack_int LAPACKE_slacpy_work( int matrix_order, char uplo, lapack_int m,
lapack_int n, const float* a, lapack_int lda,
float* b, lapack_int ldb );
lapack_int LAPACKE_dlacpy_work( int matrix_order, char uplo, lapack_int m,
lapack_int n, const double* a, lapack_int lda,
double* b, lapack_int ldb );
lapack_int LAPACKE_clacpy_work( int matrix_order, char uplo, lapack_int m,
lapack_int n, const lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zlacpy_work( int matrix_order, char uplo, lapack_int m,
lapack_int n, const lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_zlag2c_work( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
lapack_complex_float* sa, lapack_int ldsa );
lapack_int LAPACKE_slag2d_work( int matrix_order, lapack_int m, lapack_int n,
const float* sa, lapack_int ldsa, double* a,
lapack_int lda );
lapack_int LAPACKE_dlag2s_work( int matrix_order, lapack_int m, lapack_int n,
const double* a, lapack_int lda, float* sa,
lapack_int ldsa );
lapack_int LAPACKE_clag2z_work( int matrix_order, lapack_int m, lapack_int n,
const lapack_complex_float* sa, lapack_int ldsa,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_slagge_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const float* d,
float* a, lapack_int lda, lapack_int* iseed,
float* work );
lapack_int LAPACKE_dlagge_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const double* d,
double* a, lapack_int lda, lapack_int* iseed,
double* work );
lapack_int LAPACKE_clagge_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const float* d,
lapack_complex_float* a, lapack_int lda,
lapack_int* iseed, lapack_complex_float* work );
lapack_int LAPACKE_zlagge_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int kl, lapack_int ku, const double* d,
lapack_complex_double* a, lapack_int lda,
lapack_int* iseed,
lapack_complex_double* work );
lapack_int LAPACKE_claghe_work( int matrix_order, lapack_int n, lapack_int k,
const float* d, lapack_complex_float* a,
lapack_int lda, lapack_int* iseed,
lapack_complex_float* work );
lapack_int LAPACKE_zlaghe_work( int matrix_order, lapack_int n, lapack_int k,
const double* d, lapack_complex_double* a,
lapack_int lda, lapack_int* iseed,
lapack_complex_double* work );
lapack_int LAPACKE_slagsy_work( int matrix_order, lapack_int n, lapack_int k,
const float* d, float* a, lapack_int lda,
lapack_int* iseed, float* work );
lapack_int LAPACKE_dlagsy_work( int matrix_order, lapack_int n, lapack_int k,
const double* d, double* a, lapack_int lda,
lapack_int* iseed, double* work );
lapack_int LAPACKE_clagsy_work( int matrix_order, lapack_int n, lapack_int k,
const float* d, lapack_complex_float* a,
lapack_int lda, lapack_int* iseed,
lapack_complex_float* work );
lapack_int LAPACKE_zlagsy_work( int matrix_order, lapack_int n, lapack_int k,
const double* d, lapack_complex_double* a,
lapack_int lda, lapack_int* iseed,
lapack_complex_double* work );
lapack_int LAPACKE_slapmr_work( int matrix_order, lapack_logical forwrd,
lapack_int m, lapack_int n, float* x,
lapack_int ldx, lapack_int* k );
lapack_int LAPACKE_dlapmr_work( int matrix_order, lapack_logical forwrd,
lapack_int m, lapack_int n, double* x,
lapack_int ldx, lapack_int* k );
lapack_int LAPACKE_clapmr_work( int matrix_order, lapack_logical forwrd,
lapack_int m, lapack_int n,
lapack_complex_float* x, lapack_int ldx,
lapack_int* k );
lapack_int LAPACKE_zlapmr_work( int matrix_order, lapack_logical forwrd,
lapack_int m, lapack_int n,
lapack_complex_double* x, lapack_int ldx,
lapack_int* k );
lapack_int LAPACKE_slartgp_work( float f, float g, float* cs, float* sn,
float* r );
lapack_int LAPACKE_dlartgp_work( double f, double g, double* cs, double* sn,
double* r );
lapack_int LAPACKE_slartgs_work( float x, float y, float sigma, float* cs,
float* sn );
lapack_int LAPACKE_dlartgs_work( double x, double y, double sigma, double* cs,
double* sn );
float LAPACKE_slapy2_work( float x, float y );
double LAPACKE_dlapy2_work( double x, double y );
float LAPACKE_slapy3_work( float x, float y, float z );
double LAPACKE_dlapy3_work( double x, double y, double z );
float LAPACKE_slamch_work( char cmach );
double LAPACKE_dlamch_work( char cmach );
float LAPACKE_slange_work( int matrix_order, char norm, lapack_int m,
lapack_int n, const float* a, lapack_int lda,
float* work );
double LAPACKE_dlange_work( int matrix_order, char norm, lapack_int m,
lapack_int n, const double* a, lapack_int lda,
double* work );
float LAPACKE_clange_work( int matrix_order, char norm, lapack_int m,
lapack_int n, const lapack_complex_float* a,
lapack_int lda, float* work );
double LAPACKE_zlange_work( int matrix_order, char norm, lapack_int m,
lapack_int n, const lapack_complex_double* a,
lapack_int lda, double* work );
float LAPACKE_clanhe_work( int matrix_order, char norm, char uplo,
lapack_int n, const lapack_complex_float* a,
lapack_int lda, float* work );
double LAPACKE_zlanhe_work( int matrix_order, char norm, char uplo,
lapack_int n, const lapack_complex_double* a,
lapack_int lda, double* work );
float LAPACKE_slansy_work( int matrix_order, char norm, char uplo,
lapack_int n, const float* a, lapack_int lda,
float* work );
double LAPACKE_dlansy_work( int matrix_order, char norm, char uplo,
lapack_int n, const double* a, lapack_int lda,
double* work );
float LAPACKE_clansy_work( int matrix_order, char norm, char uplo,
lapack_int n, const lapack_complex_float* a,
lapack_int lda, float* work );
double LAPACKE_zlansy_work( int matrix_order, char norm, char uplo,
lapack_int n, const lapack_complex_double* a,
lapack_int lda, double* work );
float LAPACKE_slantr_work( int matrix_order, char norm, char uplo,
char diag, lapack_int m, lapack_int n, const float* a,
lapack_int lda, float* work );
double LAPACKE_dlantr_work( int matrix_order, char norm, char uplo,
char diag, lapack_int m, lapack_int n,
const double* a, lapack_int lda, double* work );
float LAPACKE_clantr_work( int matrix_order, char norm, char uplo,
char diag, lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* work );
double LAPACKE_zlantr_work( int matrix_order, char norm, char uplo,
char diag, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* work );
lapack_int LAPACKE_slarfb_work( int matrix_order, char side, char trans,
char direct, char storev, lapack_int m,
lapack_int n, lapack_int k, const float* v,
lapack_int ldv, const float* t, lapack_int ldt,
float* c, lapack_int ldc, float* work,
lapack_int ldwork );
lapack_int LAPACKE_dlarfb_work( int matrix_order, char side, char trans,
char direct, char storev, lapack_int m,
lapack_int n, lapack_int k, const double* v,
lapack_int ldv, const double* t, lapack_int ldt,
double* c, lapack_int ldc, double* work,
lapack_int ldwork );
lapack_int LAPACKE_clarfb_work( int matrix_order, char side, char trans,
char direct, char storev, lapack_int m,
lapack_int n, lapack_int k,
const lapack_complex_float* v, lapack_int ldv,
const lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, lapack_int ldwork );
lapack_int LAPACKE_zlarfb_work( int matrix_order, char side, char trans,
char direct, char storev, lapack_int m,
lapack_int n, lapack_int k,
const lapack_complex_double* v, lapack_int ldv,
const lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work,
lapack_int ldwork );
lapack_int LAPACKE_slarfg_work( lapack_int n, float* alpha, float* x,
lapack_int incx, float* tau );
lapack_int LAPACKE_dlarfg_work( lapack_int n, double* alpha, double* x,
lapack_int incx, double* tau );
lapack_int LAPACKE_clarfg_work( lapack_int n, lapack_complex_float* alpha,
lapack_complex_float* x, lapack_int incx,
lapack_complex_float* tau );
lapack_int LAPACKE_zlarfg_work( lapack_int n, lapack_complex_double* alpha,
lapack_complex_double* x, lapack_int incx,
lapack_complex_double* tau );
lapack_int LAPACKE_slarft_work( int matrix_order, char direct, char storev,
lapack_int n, lapack_int k, const float* v,
lapack_int ldv, const float* tau, float* t,
lapack_int ldt );
lapack_int LAPACKE_dlarft_work( int matrix_order, char direct, char storev,
lapack_int n, lapack_int k, const double* v,
lapack_int ldv, const double* tau, double* t,
lapack_int ldt );
lapack_int LAPACKE_clarft_work( int matrix_order, char direct, char storev,
lapack_int n, lapack_int k,
const lapack_complex_float* v, lapack_int ldv,
const lapack_complex_float* tau,
lapack_complex_float* t, lapack_int ldt );
lapack_int LAPACKE_zlarft_work( int matrix_order, char direct, char storev,
lapack_int n, lapack_int k,
const lapack_complex_double* v, lapack_int ldv,
const lapack_complex_double* tau,
lapack_complex_double* t, lapack_int ldt );
lapack_int LAPACKE_slarfx_work( int matrix_order, char side, lapack_int m,
lapack_int n, const float* v, float tau,
float* c, lapack_int ldc, float* work );
lapack_int LAPACKE_dlarfx_work( int matrix_order, char side, lapack_int m,
lapack_int n, const double* v, double tau,
double* c, lapack_int ldc, double* work );
lapack_int LAPACKE_clarfx_work( int matrix_order, char side, lapack_int m,
lapack_int n, const lapack_complex_float* v,
lapack_complex_float tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work );
lapack_int LAPACKE_zlarfx_work( int matrix_order, char side, lapack_int m,
lapack_int n, const lapack_complex_double* v,
lapack_complex_double tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work );
lapack_int LAPACKE_slarnv_work( lapack_int idist, lapack_int* iseed,
lapack_int n, float* x );
lapack_int LAPACKE_dlarnv_work( lapack_int idist, lapack_int* iseed,
lapack_int n, double* x );
lapack_int LAPACKE_clarnv_work( lapack_int idist, lapack_int* iseed,
lapack_int n, lapack_complex_float* x );
lapack_int LAPACKE_zlarnv_work( lapack_int idist, lapack_int* iseed,
lapack_int n, lapack_complex_double* x );
lapack_int LAPACKE_slaset_work( int matrix_order, char uplo, lapack_int m,
lapack_int n, float alpha, float beta, float* a,
lapack_int lda );
lapack_int LAPACKE_dlaset_work( int matrix_order, char uplo, lapack_int m,
lapack_int n, double alpha, double beta,
double* a, lapack_int lda );
lapack_int LAPACKE_claset_work( int matrix_order, char uplo, lapack_int m,
lapack_int n, lapack_complex_float alpha,
lapack_complex_float beta,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_zlaset_work( int matrix_order, char uplo, lapack_int m,
lapack_int n, lapack_complex_double alpha,
lapack_complex_double beta,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_slasrt_work( char id, lapack_int n, float* d );
lapack_int LAPACKE_dlasrt_work( char id, lapack_int n, double* d );
lapack_int LAPACKE_slaswp_work( int matrix_order, lapack_int n, float* a,
lapack_int lda, lapack_int k1, lapack_int k2,
const lapack_int* ipiv, lapack_int incx );
lapack_int LAPACKE_dlaswp_work( int matrix_order, lapack_int n, double* a,
lapack_int lda, lapack_int k1, lapack_int k2,
const lapack_int* ipiv, lapack_int incx );
lapack_int LAPACKE_claswp_work( int matrix_order, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int k1, lapack_int k2,
const lapack_int* ipiv, lapack_int incx );
lapack_int LAPACKE_zlaswp_work( int matrix_order, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int k1, lapack_int k2,
const lapack_int* ipiv, lapack_int incx );
lapack_int LAPACKE_slatms_work( int matrix_order, lapack_int m, lapack_int n,
char dist, lapack_int* iseed, char sym,
float* d, lapack_int mode, float cond,
float dmax, lapack_int kl, lapack_int ku,
char pack, float* a, lapack_int lda,
float* work );
lapack_int LAPACKE_dlatms_work( int matrix_order, lapack_int m, lapack_int n,
char dist, lapack_int* iseed, char sym,
double* d, lapack_int mode, double cond,
double dmax, lapack_int kl, lapack_int ku,
char pack, double* a, lapack_int lda,
double* work );
lapack_int LAPACKE_clatms_work( int matrix_order, lapack_int m, lapack_int n,
char dist, lapack_int* iseed, char sym,
float* d, lapack_int mode, float cond,
float dmax, lapack_int kl, lapack_int ku,
char pack, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* work );
lapack_int LAPACKE_zlatms_work( int matrix_order, lapack_int m, lapack_int n,
char dist, lapack_int* iseed, char sym,
double* d, lapack_int mode, double cond,
double dmax, lapack_int kl, lapack_int ku,
char pack, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* work );
lapack_int LAPACKE_slauum_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda );
lapack_int LAPACKE_dlauum_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda );
lapack_int LAPACKE_clauum_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_zlauum_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_sopgtr_work( int matrix_order, char uplo, lapack_int n,
const float* ap, const float* tau, float* q,
lapack_int ldq, float* work );
lapack_int LAPACKE_dopgtr_work( int matrix_order, char uplo, lapack_int n,
const double* ap, const double* tau, double* q,
lapack_int ldq, double* work );
lapack_int LAPACKE_sopmtr_work( int matrix_order, char side, char uplo,
char trans, lapack_int m, lapack_int n,
const float* ap, const float* tau, float* c,
lapack_int ldc, float* work );
lapack_int LAPACKE_dopmtr_work( int matrix_order, char side, char uplo,
char trans, lapack_int m, lapack_int n,
const double* ap, const double* tau, double* c,
lapack_int ldc, double* work );
lapack_int LAPACKE_sorgbr_work( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int k, float* a,
lapack_int lda, const float* tau, float* work,
lapack_int lwork );
lapack_int LAPACKE_dorgbr_work( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int k, double* a,
lapack_int lda, const double* tau, double* work,
lapack_int lwork );
lapack_int LAPACKE_sorghr_work( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, float* a, lapack_int lda,
const float* tau, float* work,
lapack_int lwork );
lapack_int LAPACKE_dorghr_work( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, double* a, lapack_int lda,
const double* tau, double* work,
lapack_int lwork );
lapack_int LAPACKE_sorglq_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, float* a, lapack_int lda,
const float* tau, float* work,
lapack_int lwork );
lapack_int LAPACKE_dorglq_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, double* a, lapack_int lda,
const double* tau, double* work,
lapack_int lwork );
lapack_int LAPACKE_sorgql_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, float* a, lapack_int lda,
const float* tau, float* work,
lapack_int lwork );
lapack_int LAPACKE_dorgql_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, double* a, lapack_int lda,
const double* tau, double* work,
lapack_int lwork );
lapack_int LAPACKE_sorgqr_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, float* a, lapack_int lda,
const float* tau, float* work,
lapack_int lwork );
lapack_int LAPACKE_dorgqr_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, double* a, lapack_int lda,
const double* tau, double* work,
lapack_int lwork );
lapack_int LAPACKE_sorgrq_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, float* a, lapack_int lda,
const float* tau, float* work,
lapack_int lwork );
lapack_int LAPACKE_dorgrq_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, double* a, lapack_int lda,
const double* tau, double* work,
lapack_int lwork );
lapack_int LAPACKE_sorgtr_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda, const float* tau,
float* work, lapack_int lwork );
lapack_int LAPACKE_dorgtr_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda, const double* tau,
double* work, lapack_int lwork );
lapack_int LAPACKE_sormbr_work( int matrix_order, char vect, char side,
char trans, lapack_int m, lapack_int n,
lapack_int k, const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc,
float* work, lapack_int lwork );
lapack_int LAPACKE_dormbr_work( int matrix_order, char vect, char side,
char trans, lapack_int m, lapack_int n,
lapack_int k, const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc,
double* work, lapack_int lwork );
lapack_int LAPACKE_sormhr_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int ilo,
lapack_int ihi, const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc,
float* work, lapack_int lwork );
lapack_int LAPACKE_dormhr_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int ilo,
lapack_int ihi, const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc,
double* work, lapack_int lwork );
lapack_int LAPACKE_sormlq_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc,
float* work, lapack_int lwork );
lapack_int LAPACKE_dormlq_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc,
double* work, lapack_int lwork );
lapack_int LAPACKE_sormql_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc,
float* work, lapack_int lwork );
lapack_int LAPACKE_dormql_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc,
double* work, lapack_int lwork );
lapack_int LAPACKE_sormqr_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc,
float* work, lapack_int lwork );
lapack_int LAPACKE_dormqr_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc,
double* work, lapack_int lwork );
lapack_int LAPACKE_sormrq_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc,
float* work, lapack_int lwork );
lapack_int LAPACKE_dormrq_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc,
double* work, lapack_int lwork );
lapack_int LAPACKE_sormrz_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc,
float* work, lapack_int lwork );
lapack_int LAPACKE_dormrz_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc,
double* work, lapack_int lwork );
lapack_int LAPACKE_sormtr_work( int matrix_order, char side, char uplo,
char trans, lapack_int m, lapack_int n,
const float* a, lapack_int lda,
const float* tau, float* c, lapack_int ldc,
float* work, lapack_int lwork );
lapack_int LAPACKE_dormtr_work( int matrix_order, char side, char uplo,
char trans, lapack_int m, lapack_int n,
const double* a, lapack_int lda,
const double* tau, double* c, lapack_int ldc,
double* work, lapack_int lwork );
lapack_int LAPACKE_spbcon_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const float* ab, lapack_int ldab,
float anorm, float* rcond, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dpbcon_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const double* ab,
lapack_int ldab, double anorm, double* rcond,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cpbcon_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const lapack_complex_float* ab,
lapack_int ldab, float anorm, float* rcond,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zpbcon_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const lapack_complex_double* ab,
lapack_int ldab, double anorm, double* rcond,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_spbequ_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const float* ab, lapack_int ldab,
float* s, float* scond, float* amax );
lapack_int LAPACKE_dpbequ_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const double* ab,
lapack_int ldab, double* s, double* scond,
double* amax );
lapack_int LAPACKE_cpbequ_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const lapack_complex_float* ab,
lapack_int ldab, float* s, float* scond,
float* amax );
lapack_int LAPACKE_zpbequ_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, const lapack_complex_double* ab,
lapack_int ldab, double* s, double* scond,
double* amax );
lapack_int LAPACKE_spbrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, const float* ab,
lapack_int ldab, const float* afb,
lapack_int ldafb, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dpbrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const double* ab, lapack_int ldab,
const double* afb, lapack_int ldafb,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* ferr, double* berr,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cpbrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
const lapack_complex_float* afb,
lapack_int ldafb, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zpbrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const lapack_complex_double* ab,
lapack_int ldab,
const lapack_complex_double* afb,
lapack_int ldafb,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_spbstf_work( int matrix_order, char uplo, lapack_int n,
lapack_int kb, float* bb, lapack_int ldbb );
lapack_int LAPACKE_dpbstf_work( int matrix_order, char uplo, lapack_int n,
lapack_int kb, double* bb, lapack_int ldbb );
lapack_int LAPACKE_cpbstf_work( int matrix_order, char uplo, lapack_int n,
lapack_int kb, lapack_complex_float* bb,
lapack_int ldbb );
lapack_int LAPACKE_zpbstf_work( int matrix_order, char uplo, lapack_int n,
lapack_int kb, lapack_complex_double* bb,
lapack_int ldbb );
lapack_int LAPACKE_spbsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, float* ab,
lapack_int ldab, float* b, lapack_int ldb );
lapack_int LAPACKE_dpbsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, double* ab,
lapack_int ldab, double* b, lapack_int ldb );
lapack_int LAPACKE_cpbsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpbsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_spbsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int kd, lapack_int nrhs,
float* ab, lapack_int ldab, float* afb,
lapack_int ldafb, char* equed, float* s,
float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, float* work, lapack_int* iwork );
lapack_int LAPACKE_dpbsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int kd, lapack_int nrhs,
double* ab, lapack_int ldab, double* afb,
lapack_int ldafb, char* equed, double* s,
double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* ferr,
double* berr, double* work, lapack_int* iwork );
lapack_int LAPACKE_cpbsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int kd, lapack_int nrhs,
lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* afb, lapack_int ldafb,
char* equed, float* s, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zpbsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int kd, lapack_int nrhs,
lapack_complex_double* ab, lapack_int ldab,
lapack_complex_double* afb, lapack_int ldafb,
char* equed, double* s,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_spbtrf_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, float* ab, lapack_int ldab );
lapack_int LAPACKE_dpbtrf_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, double* ab, lapack_int ldab );
lapack_int LAPACKE_cpbtrf_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_complex_float* ab,
lapack_int ldab );
lapack_int LAPACKE_zpbtrf_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_complex_double* ab,
lapack_int ldab );
lapack_int LAPACKE_spbtrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs, const float* ab,
lapack_int ldab, float* b, lapack_int ldb );
lapack_int LAPACKE_dpbtrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const double* ab, lapack_int ldab, double* b,
lapack_int ldb );
lapack_int LAPACKE_cpbtrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const lapack_complex_float* ab, lapack_int ldab,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpbtrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int kd, lapack_int nrhs,
const lapack_complex_double* ab,
lapack_int ldab, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_spftrf_work( int matrix_order, char transr, char uplo,
lapack_int n, float* a );
lapack_int LAPACKE_dpftrf_work( int matrix_order, char transr, char uplo,
lapack_int n, double* a );
lapack_int LAPACKE_cpftrf_work( int matrix_order, char transr, char uplo,
lapack_int n, lapack_complex_float* a );
lapack_int LAPACKE_zpftrf_work( int matrix_order, char transr, char uplo,
lapack_int n, lapack_complex_double* a );
lapack_int LAPACKE_spftri_work( int matrix_order, char transr, char uplo,
lapack_int n, float* a );
lapack_int LAPACKE_dpftri_work( int matrix_order, char transr, char uplo,
lapack_int n, double* a );
lapack_int LAPACKE_cpftri_work( int matrix_order, char transr, char uplo,
lapack_int n, lapack_complex_float* a );
lapack_int LAPACKE_zpftri_work( int matrix_order, char transr, char uplo,
lapack_int n, lapack_complex_double* a );
lapack_int LAPACKE_spftrs_work( int matrix_order, char transr, char uplo,
lapack_int n, lapack_int nrhs, const float* a,
float* b, lapack_int ldb );
lapack_int LAPACKE_dpftrs_work( int matrix_order, char transr, char uplo,
lapack_int n, lapack_int nrhs, const double* a,
double* b, lapack_int ldb );
lapack_int LAPACKE_cpftrs_work( int matrix_order, char transr, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpftrs_work( int matrix_order, char transr, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_spocon_work( int matrix_order, char uplo, lapack_int n,
const float* a, lapack_int lda, float anorm,
float* rcond, float* work, lapack_int* iwork );
lapack_int LAPACKE_dpocon_work( int matrix_order, char uplo, lapack_int n,
const double* a, lapack_int lda, double anorm,
double* rcond, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cpocon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float anorm, float* rcond,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zpocon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double anorm, double* rcond,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_spoequ_work( int matrix_order, lapack_int n, const float* a,
lapack_int lda, float* s, float* scond,
float* amax );
lapack_int LAPACKE_dpoequ_work( int matrix_order, lapack_int n, const double* a,
lapack_int lda, double* s, double* scond,
double* amax );
lapack_int LAPACKE_cpoequ_work( int matrix_order, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* s, float* scond, float* amax );
lapack_int LAPACKE_zpoequ_work( int matrix_order, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* s, double* scond, double* amax );
lapack_int LAPACKE_spoequb_work( int matrix_order, lapack_int n, const float* a,
lapack_int lda, float* s, float* scond,
float* amax );
lapack_int LAPACKE_dpoequb_work( int matrix_order, lapack_int n,
const double* a, lapack_int lda, double* s,
double* scond, double* amax );
lapack_int LAPACKE_cpoequb_work( int matrix_order, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* s, float* scond, float* amax );
lapack_int LAPACKE_zpoequb_work( int matrix_order, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* s, double* scond, double* amax );
lapack_int LAPACKE_sporfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const float* af, lapack_int ldaf,
const float* b, lapack_int ldb, float* x,
lapack_int ldx, float* ferr, float* berr,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dporfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a,
lapack_int lda, const double* af,
lapack_int ldaf, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* ferr, double* berr, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cporfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* af,
lapack_int ldaf, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zporfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* af,
lapack_int ldaf, const lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sporfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs, const float* a,
lapack_int lda, const float* af,
lapack_int ldaf, const float* s,
const float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dporfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs, const double* a,
lapack_int lda, const double* af,
lapack_int ldaf, const double* s,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cporfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* af,
lapack_int ldaf, const float* s,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zporfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* af,
lapack_int ldaf, const double* s,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_sposv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda,
float* b, lapack_int ldb );
lapack_int LAPACKE_dposv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* b, lapack_int ldb );
lapack_int LAPACKE_cposv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zposv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_dsposv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
double* b, lapack_int ldb, double* x,
lapack_int ldx, double* work, float* swork,
lapack_int* iter );
lapack_int LAPACKE_zcposv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, lapack_complex_double* work,
lapack_complex_float* swork, double* rwork,
lapack_int* iter );
lapack_int LAPACKE_sposvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
char* equed, float* s, float* b, lapack_int ldb,
float* x, lapack_int ldx, float* rcond,
float* ferr, float* berr, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dposvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* af, lapack_int ldaf,
char* equed, double* s, double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cposvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
char* equed, float* s, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zposvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
char* equed, double* s,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sposvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
char* equed, float* s, float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dposvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* af, lapack_int ldaf,
char* equed, double* s, double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cposvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
char* equed, float* s, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* rpvgrw,
float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zposvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
char* equed, double* s,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_spotrf_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda );
lapack_int LAPACKE_dpotrf_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda );
lapack_int LAPACKE_cpotrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_zpotrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_spotri_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda );
lapack_int LAPACKE_dpotri_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda );
lapack_int LAPACKE_cpotri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_zpotri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_spotrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
float* b, lapack_int ldb );
lapack_int LAPACKE_dpotrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a,
lapack_int lda, double* b, lapack_int ldb );
lapack_int LAPACKE_cpotrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zpotrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_sppcon_work( int matrix_order, char uplo, lapack_int n,
const float* ap, float anorm, float* rcond,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dppcon_work( int matrix_order, char uplo, lapack_int n,
const double* ap, double anorm, double* rcond,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cppcon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap, float anorm,
float* rcond, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zppcon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap, double anorm,
double* rcond, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_sppequ_work( int matrix_order, char uplo, lapack_int n,
const float* ap, float* s, float* scond,
float* amax );
lapack_int LAPACKE_dppequ_work( int matrix_order, char uplo, lapack_int n,
const double* ap, double* s, double* scond,
double* amax );
lapack_int LAPACKE_cppequ_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap, float* s,
float* scond, float* amax );
lapack_int LAPACKE_zppequ_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap, double* s,
double* scond, double* amax );
lapack_int LAPACKE_spprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* ap,
const float* afp, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dpprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* ap,
const double* afp, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* ferr, double* berr, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cpprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_complex_float* afp,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zpprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs,
const lapack_complex_double* ap,
const lapack_complex_double* afp,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sppsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, float* ap, float* b,
lapack_int ldb );
lapack_int LAPACKE_dppsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* ap, double* b,
lapack_int ldb );
lapack_int LAPACKE_cppsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* ap,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zppsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* ap,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sppsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, float* ap,
float* afp, char* equed, float* s, float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dppsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, double* ap,
double* afp, char* equed, double* s, double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cppsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_float* ap,
lapack_complex_float* afp, char* equed,
float* s, lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zppsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_double* ap,
lapack_complex_double* afp, char* equed,
double* s, lapack_complex_double* b,
lapack_int ldb, lapack_complex_double* x,
lapack_int ldx, double* rcond, double* ferr,
double* berr, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_spptrf_work( int matrix_order, char uplo, lapack_int n,
float* ap );
lapack_int LAPACKE_dpptrf_work( int matrix_order, char uplo, lapack_int n,
double* ap );
lapack_int LAPACKE_cpptrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap );
lapack_int LAPACKE_zpptrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap );
lapack_int LAPACKE_spptri_work( int matrix_order, char uplo, lapack_int n,
float* ap );
lapack_int LAPACKE_dpptri_work( int matrix_order, char uplo, lapack_int n,
double* ap );
lapack_int LAPACKE_cpptri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap );
lapack_int LAPACKE_zpptri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap );
lapack_int LAPACKE_spptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* ap, float* b,
lapack_int ldb );
lapack_int LAPACKE_dpptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* ap, double* b,
lapack_int ldb );
lapack_int LAPACKE_cpptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs,
const lapack_complex_double* ap,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_spstrf_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda, lapack_int* piv,
lapack_int* rank, float tol, float* work );
lapack_int LAPACKE_dpstrf_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda, lapack_int* piv,
lapack_int* rank, double tol, double* work );
lapack_int LAPACKE_cpstrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* piv, lapack_int* rank, float tol,
float* work );
lapack_int LAPACKE_zpstrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* piv, lapack_int* rank, double tol,
double* work );
lapack_int LAPACKE_sptcon_work( lapack_int n, const float* d, const float* e,
float anorm, float* rcond, float* work );
lapack_int LAPACKE_dptcon_work( lapack_int n, const double* d, const double* e,
double anorm, double* rcond, double* work );
lapack_int LAPACKE_cptcon_work( lapack_int n, const float* d,
const lapack_complex_float* e, float anorm,
float* rcond, float* work );
lapack_int LAPACKE_zptcon_work( lapack_int n, const double* d,
const lapack_complex_double* e, double anorm,
double* rcond, double* work );
lapack_int LAPACKE_spteqr_work( int matrix_order, char compz, lapack_int n,
float* d, float* e, float* z, lapack_int ldz,
float* work );
lapack_int LAPACKE_dpteqr_work( int matrix_order, char compz, lapack_int n,
double* d, double* e, double* z, lapack_int ldz,
double* work );
lapack_int LAPACKE_cpteqr_work( int matrix_order, char compz, lapack_int n,
float* d, float* e, lapack_complex_float* z,
lapack_int ldz, float* work );
lapack_int LAPACKE_zpteqr_work( int matrix_order, char compz, lapack_int n,
double* d, double* e, lapack_complex_double* z,
lapack_int ldz, double* work );
lapack_int LAPACKE_sptrfs_work( int matrix_order, lapack_int n, lapack_int nrhs,
const float* d, const float* e, const float* df,
const float* ef, const float* b, lapack_int ldb,
float* x, lapack_int ldx, float* ferr,
float* berr, float* work );
lapack_int LAPACKE_dptrfs_work( int matrix_order, lapack_int n, lapack_int nrhs,
const double* d, const double* e,
const double* df, const double* ef,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* ferr, double* berr,
double* work );
lapack_int LAPACKE_cptrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* d,
const lapack_complex_float* e, const float* df,
const lapack_complex_float* ef,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zptrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* d,
const lapack_complex_double* e,
const double* df,
const lapack_complex_double* ef,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,
float* d, float* e, float* b, lapack_int ldb );
lapack_int LAPACKE_dptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,
double* d, double* e, double* b,
lapack_int ldb );
lapack_int LAPACKE_cptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,
float* d, lapack_complex_float* e,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,
double* d, lapack_complex_double* e,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sptsvx_work( int matrix_order, char fact, lapack_int n,
lapack_int nrhs, const float* d, const float* e,
float* df, float* ef, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
float* work );
lapack_int LAPACKE_dptsvx_work( int matrix_order, char fact, lapack_int n,
lapack_int nrhs, const double* d,
const double* e, double* df, double* ef,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* ferr,
double* berr, double* work );
lapack_int LAPACKE_cptsvx_work( int matrix_order, char fact, lapack_int n,
lapack_int nrhs, const float* d,
const lapack_complex_float* e, float* df,
lapack_complex_float* ef,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zptsvx_work( int matrix_order, char fact, lapack_int n,
lapack_int nrhs, const double* d,
const lapack_complex_double* e, double* df,
lapack_complex_double* ef,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_spttrf_work( lapack_int n, float* d, float* e );
lapack_int LAPACKE_dpttrf_work( lapack_int n, double* d, double* e );
lapack_int LAPACKE_cpttrf_work( lapack_int n, float* d,
lapack_complex_float* e );
lapack_int LAPACKE_zpttrf_work( lapack_int n, double* d,
lapack_complex_double* e );
lapack_int LAPACKE_spttrs_work( int matrix_order, lapack_int n, lapack_int nrhs,
const float* d, const float* e, float* b,
lapack_int ldb );
lapack_int LAPACKE_dpttrs_work( int matrix_order, lapack_int n, lapack_int nrhs,
const double* d, const double* e, double* b,
lapack_int ldb );
lapack_int LAPACKE_cpttrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* d,
const lapack_complex_float* e,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zpttrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* d,
const lapack_complex_double* e,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_ssbev_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int kd, float* ab,
lapack_int ldab, float* w, float* z,
lapack_int ldz, float* work );
lapack_int LAPACKE_dsbev_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int kd, double* ab,
lapack_int ldab, double* w, double* z,
lapack_int ldz, double* work );
lapack_int LAPACKE_ssbevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int kd, float* ab,
lapack_int ldab, float* w, float* z,
lapack_int ldz, float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_dsbevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int kd, double* ab,
lapack_int ldab, double* w, double* z,
lapack_int ldz, double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_ssbevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, lapack_int kd,
float* ab, lapack_int ldab, float* q,
lapack_int ldq, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z,
lapack_int ldz, float* work, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_dsbevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, lapack_int kd,
double* ab, lapack_int ldab, double* q,
lapack_int ldq, double vl, double vu,
lapack_int il, lapack_int iu, double abstol,
lapack_int* m, double* w, double* z,
lapack_int ldz, double* work, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_ssbgst_work( int matrix_order, char vect, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
float* ab, lapack_int ldab, const float* bb,
lapack_int ldbb, float* x, lapack_int ldx,
float* work );
lapack_int LAPACKE_dsbgst_work( int matrix_order, char vect, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
double* ab, lapack_int ldab, const double* bb,
lapack_int ldbb, double* x, lapack_int ldx,
double* work );
lapack_int LAPACKE_ssbgv_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
float* ab, lapack_int ldab, float* bb,
lapack_int ldbb, float* w, float* z,
lapack_int ldz, float* work );
lapack_int LAPACKE_dsbgv_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
double* ab, lapack_int ldab, double* bb,
lapack_int ldbb, double* w, double* z,
lapack_int ldz, double* work );
lapack_int LAPACKE_ssbgvd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
float* ab, lapack_int ldab, float* bb,
lapack_int ldbb, float* w, float* z,
lapack_int ldz, float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_dsbgvd_work( int matrix_order, char jobz, char uplo,
lapack_int n, lapack_int ka, lapack_int kb,
double* ab, lapack_int ldab, double* bb,
lapack_int ldbb, double* w, double* z,
lapack_int ldz, double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_ssbgvx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, lapack_int ka,
lapack_int kb, float* ab, lapack_int ldab,
float* bb, lapack_int ldbb, float* q,
lapack_int ldq, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z,
lapack_int ldz, float* work, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_dsbgvx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, lapack_int ka,
lapack_int kb, double* ab, lapack_int ldab,
double* bb, lapack_int ldbb, double* q,
lapack_int ldq, double vl, double vu,
lapack_int il, lapack_int iu, double abstol,
lapack_int* m, double* w, double* z,
lapack_int ldz, double* work, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_ssbtrd_work( int matrix_order, char vect, char uplo,
lapack_int n, lapack_int kd, float* ab,
lapack_int ldab, float* d, float* e, float* q,
lapack_int ldq, float* work );
lapack_int LAPACKE_dsbtrd_work( int matrix_order, char vect, char uplo,
lapack_int n, lapack_int kd, double* ab,
lapack_int ldab, double* d, double* e,
double* q, lapack_int ldq, double* work );
lapack_int LAPACKE_ssfrk_work( int matrix_order, char transr, char uplo,
char trans, lapack_int n, lapack_int k,
float alpha, const float* a, lapack_int lda,
float beta, float* c );
lapack_int LAPACKE_dsfrk_work( int matrix_order, char transr, char uplo,
char trans, lapack_int n, lapack_int k,
double alpha, const double* a, lapack_int lda,
double beta, double* c );
lapack_int LAPACKE_sspcon_work( int matrix_order, char uplo, lapack_int n,
const float* ap, const lapack_int* ipiv,
float anorm, float* rcond, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dspcon_work( int matrix_order, char uplo, lapack_int n,
const double* ap, const lapack_int* ipiv,
double anorm, double* rcond, double* work,
lapack_int* iwork );
lapack_int LAPACKE_cspcon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap,
const lapack_int* ipiv, float anorm,
float* rcond, lapack_complex_float* work );
lapack_int LAPACKE_zspcon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap,
const lapack_int* ipiv, double anorm,
double* rcond, lapack_complex_double* work );
lapack_int LAPACKE_sspev_work( int matrix_order, char jobz, char uplo,
lapack_int n, float* ap, float* w, float* z,
lapack_int ldz, float* work );
lapack_int LAPACKE_dspev_work( int matrix_order, char jobz, char uplo,
lapack_int n, double* ap, double* w, double* z,
lapack_int ldz, double* work );
lapack_int LAPACKE_sspevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, float* ap, float* w, float* z,
lapack_int ldz, float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_dspevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, double* ap, double* w, double* z,
lapack_int ldz, double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_sspevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, float* ap, float vl,
float vu, lapack_int il, lapack_int iu,
float abstol, lapack_int* m, float* w, float* z,
lapack_int ldz, float* work, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_dspevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, double* ap, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
double* z, lapack_int ldz, double* work,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_sspgst_work( int matrix_order, lapack_int itype, char uplo,
lapack_int n, float* ap, const float* bp );
lapack_int LAPACKE_dspgst_work( int matrix_order, lapack_int itype, char uplo,
lapack_int n, double* ap, const double* bp );
lapack_int LAPACKE_sspgv_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, float* ap, float* bp,
float* w, float* z, lapack_int ldz,
float* work );
lapack_int LAPACKE_dspgv_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, double* ap, double* bp,
double* w, double* z, lapack_int ldz,
double* work );
lapack_int LAPACKE_sspgvd_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, float* ap, float* bp,
float* w, float* z, lapack_int ldz, float* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_dspgvd_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, double* ap, double* bp,
double* w, double* z, lapack_int ldz,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_sspgvx_work( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n, float* ap,
float* bp, float vl, float vu, lapack_int il,
lapack_int iu, float abstol, lapack_int* m,
float* w, float* z, lapack_int ldz, float* work,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_dspgvx_work( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n, double* ap,
double* bp, double vl, double vu, lapack_int il,
lapack_int iu, double abstol, lapack_int* m,
double* w, double* z, lapack_int ldz,
double* work, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_ssprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* ap,
const float* afp, const lapack_int* ipiv,
const float* b, lapack_int ldb, float* x,
lapack_int ldx, float* ferr, float* berr,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dsprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* ap,
const double* afp, const lapack_int* ipiv,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* ferr, double* berr,
double* work, lapack_int* iwork );
lapack_int LAPACKE_csprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_complex_float* afp,
const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zsprfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs,
const lapack_complex_double* ap,
const lapack_complex_double* afp,
const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_sspsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, float* ap, lapack_int* ipiv,
float* b, lapack_int ldb );
lapack_int LAPACKE_dspsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* ap, lapack_int* ipiv,
double* b, lapack_int ldb );
lapack_int LAPACKE_cspsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* ap,
lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zspsv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* ap,
lapack_int* ipiv, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_sspsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, const float* ap,
float* afp, lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dspsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, const double* ap,
double* afp, lapack_int* ipiv, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
double* work, lapack_int* iwork );
lapack_int LAPACKE_cspsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* ap,
lapack_complex_float* afp, lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zspsvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* ap,
lapack_complex_double* afp, lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_ssptrd_work( int matrix_order, char uplo, lapack_int n,
float* ap, float* d, float* e, float* tau );
lapack_int LAPACKE_dsptrd_work( int matrix_order, char uplo, lapack_int n,
double* ap, double* d, double* e, double* tau );
lapack_int LAPACKE_ssptrf_work( int matrix_order, char uplo, lapack_int n,
float* ap, lapack_int* ipiv );
lapack_int LAPACKE_dsptrf_work( int matrix_order, char uplo, lapack_int n,
double* ap, lapack_int* ipiv );
lapack_int LAPACKE_csptrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap, lapack_int* ipiv );
lapack_int LAPACKE_zsptrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap, lapack_int* ipiv );
lapack_int LAPACKE_ssptri_work( int matrix_order, char uplo, lapack_int n,
float* ap, const lapack_int* ipiv,
float* work );
lapack_int LAPACKE_dsptri_work( int matrix_order, char uplo, lapack_int n,
double* ap, const lapack_int* ipiv,
double* work );
lapack_int LAPACKE_csptri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* ap,
const lapack_int* ipiv,
lapack_complex_float* work );
lapack_int LAPACKE_zsptri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* ap,
const lapack_int* ipiv,
lapack_complex_double* work );
lapack_int LAPACKE_ssptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* ap,
const lapack_int* ipiv, float* b,
lapack_int ldb );
lapack_int LAPACKE_dsptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* ap,
const lapack_int* ipiv, double* b,
lapack_int ldb );
lapack_int LAPACKE_csptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* ap,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_zsptrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs,
const lapack_complex_double* ap,
const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_sstebz_work( char range, char order, lapack_int n, float vl,
float vu, lapack_int il, lapack_int iu,
float abstol, const float* d, const float* e,
lapack_int* m, lapack_int* nsplit, float* w,
lapack_int* iblock, lapack_int* isplit,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dstebz_work( char range, char order, lapack_int n, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, const double* d, const double* e,
lapack_int* m, lapack_int* nsplit, double* w,
lapack_int* iblock, lapack_int* isplit,
double* work, lapack_int* iwork );
lapack_int LAPACKE_sstedc_work( int matrix_order, char compz, lapack_int n,
float* d, float* e, float* z, lapack_int ldz,
float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_dstedc_work( int matrix_order, char compz, lapack_int n,
double* d, double* e, double* z, lapack_int ldz,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_cstedc_work( int matrix_order, char compz, lapack_int n,
float* d, float* e, lapack_complex_float* z,
lapack_int ldz, lapack_complex_float* work,
lapack_int lwork, float* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_zstedc_work( int matrix_order, char compz, lapack_int n,
double* d, double* e, lapack_complex_double* z,
lapack_int ldz, lapack_complex_double* work,
lapack_int lwork, double* rwork,
lapack_int lrwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_sstegr_work( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl,
float vu, lapack_int il, lapack_int iu,
float abstol, lapack_int* m, float* w, float* z,
lapack_int ldz, lapack_int* isuppz, float* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_dstegr_work( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
double* z, lapack_int ldz, lapack_int* isuppz,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_cstegr_work( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl,
float vu, lapack_int il, lapack_int iu,
float abstol, lapack_int* m, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_int* isuppz, float* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_zstegr_work( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_int* isuppz, double* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_sstein_work( int matrix_order, lapack_int n, const float* d,
const float* e, lapack_int m, const float* w,
const lapack_int* iblock,
const lapack_int* isplit, float* z,
lapack_int ldz, float* work, lapack_int* iwork,
lapack_int* ifailv );
lapack_int LAPACKE_dstein_work( int matrix_order, lapack_int n, const double* d,
const double* e, lapack_int m, const double* w,
const lapack_int* iblock,
const lapack_int* isplit, double* z,
lapack_int ldz, double* work, lapack_int* iwork,
lapack_int* ifailv );
lapack_int LAPACKE_cstein_work( int matrix_order, lapack_int n, const float* d,
const float* e, lapack_int m, const float* w,
const lapack_int* iblock,
const lapack_int* isplit,
lapack_complex_float* z, lapack_int ldz,
float* work, lapack_int* iwork,
lapack_int* ifailv );
lapack_int LAPACKE_zstein_work( int matrix_order, lapack_int n, const double* d,
const double* e, lapack_int m, const double* w,
const lapack_int* iblock,
const lapack_int* isplit,
lapack_complex_double* z, lapack_int ldz,
double* work, lapack_int* iwork,
lapack_int* ifailv );
lapack_int LAPACKE_sstemr_work( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl,
float vu, lapack_int il, lapack_int iu,
lapack_int* m, float* w, float* z,
lapack_int ldz, lapack_int nzc,
lapack_int* isuppz, lapack_logical* tryrac,
float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_dstemr_work( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int nzc,
lapack_int* isuppz, lapack_logical* tryrac,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_cstemr_work( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl,
float vu, lapack_int il, lapack_int iu,
lapack_int* m, float* w,
lapack_complex_float* z, lapack_int ldz,
lapack_int nzc, lapack_int* isuppz,
lapack_logical* tryrac, float* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_zstemr_work( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
lapack_int* m, double* w,
lapack_complex_double* z, lapack_int ldz,
lapack_int nzc, lapack_int* isuppz,
lapack_logical* tryrac, double* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_ssteqr_work( int matrix_order, char compz, lapack_int n,
float* d, float* e, float* z, lapack_int ldz,
float* work );
lapack_int LAPACKE_dsteqr_work( int matrix_order, char compz, lapack_int n,
double* d, double* e, double* z, lapack_int ldz,
double* work );
lapack_int LAPACKE_csteqr_work( int matrix_order, char compz, lapack_int n,
float* d, float* e, lapack_complex_float* z,
lapack_int ldz, float* work );
lapack_int LAPACKE_zsteqr_work( int matrix_order, char compz, lapack_int n,
double* d, double* e, lapack_complex_double* z,
lapack_int ldz, double* work );
lapack_int LAPACKE_ssterf_work( lapack_int n, float* d, float* e );
lapack_int LAPACKE_dsterf_work( lapack_int n, double* d, double* e );
lapack_int LAPACKE_sstev_work( int matrix_order, char jobz, lapack_int n,
float* d, float* e, float* z, lapack_int ldz,
float* work );
lapack_int LAPACKE_dstev_work( int matrix_order, char jobz, lapack_int n,
double* d, double* e, double* z, lapack_int ldz,
double* work );
lapack_int LAPACKE_sstevd_work( int matrix_order, char jobz, lapack_int n,
float* d, float* e, float* z, lapack_int ldz,
float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_dstevd_work( int matrix_order, char jobz, lapack_int n,
double* d, double* e, double* z, lapack_int ldz,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_sstevr_work( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl,
float vu, lapack_int il, lapack_int iu,
float abstol, lapack_int* m, float* w, float* z,
lapack_int ldz, lapack_int* isuppz, float* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_dstevr_work( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
double* z, lapack_int ldz, lapack_int* isuppz,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_sstevx_work( int matrix_order, char jobz, char range,
lapack_int n, float* d, float* e, float vl,
float vu, lapack_int il, lapack_int iu,
float abstol, lapack_int* m, float* w, float* z,
lapack_int ldz, float* work, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_dstevx_work( int matrix_order, char jobz, char range,
lapack_int n, double* d, double* e, double vl,
double vu, lapack_int il, lapack_int iu,
double abstol, lapack_int* m, double* w,
double* z, lapack_int ldz, double* work,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_ssycon_work( int matrix_order, char uplo, lapack_int n,
const float* a, lapack_int lda,
const lapack_int* ipiv, float anorm,
float* rcond, float* work, lapack_int* iwork );
lapack_int LAPACKE_dsycon_work( int matrix_order, char uplo, lapack_int n,
const double* a, lapack_int lda,
const lapack_int* ipiv, double anorm,
double* rcond, double* work,
lapack_int* iwork );
lapack_int LAPACKE_csycon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv, float anorm,
float* rcond, lapack_complex_float* work );
lapack_int LAPACKE_zsycon_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv, double anorm,
double* rcond, lapack_complex_double* work );
lapack_int LAPACKE_ssyequb_work( int matrix_order, char uplo, lapack_int n,
const float* a, lapack_int lda, float* s,
float* scond, float* amax, float* work );
lapack_int LAPACKE_dsyequb_work( int matrix_order, char uplo, lapack_int n,
const double* a, lapack_int lda, double* s,
double* scond, double* amax, double* work );
lapack_int LAPACKE_csyequb_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* s, float* scond, float* amax,
lapack_complex_float* work );
lapack_int LAPACKE_zsyequb_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* s, double* scond, double* amax,
lapack_complex_double* work );
lapack_int LAPACKE_ssyev_work( int matrix_order, char jobz, char uplo,
lapack_int n, float* a, lapack_int lda, float* w,
float* work, lapack_int lwork );
lapack_int LAPACKE_dsyev_work( int matrix_order, char jobz, char uplo,
lapack_int n, double* a, lapack_int lda,
double* w, double* work, lapack_int lwork );
lapack_int LAPACKE_ssyevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, float* a, lapack_int lda,
float* w, float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_dsyevd_work( int matrix_order, char jobz, char uplo,
lapack_int n, double* a, lapack_int lda,
double* w, double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_ssyevr_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, float* a,
lapack_int lda, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z,
lapack_int ldz, lapack_int* isuppz, float* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_dsyevr_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, double* a,
lapack_int lda, double vl, double vu,
lapack_int il, lapack_int iu, double abstol,
lapack_int* m, double* w, double* z,
lapack_int ldz, lapack_int* isuppz,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_ssyevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, float* a,
lapack_int lda, float vl, float vu,
lapack_int il, lapack_int iu, float abstol,
lapack_int* m, float* w, float* z,
lapack_int ldz, float* work, lapack_int lwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_dsyevx_work( int matrix_order, char jobz, char range,
char uplo, lapack_int n, double* a,
lapack_int lda, double vl, double vu,
lapack_int il, lapack_int iu, double abstol,
lapack_int* m, double* w, double* z,
lapack_int ldz, double* work, lapack_int lwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_ssygst_work( int matrix_order, lapack_int itype, char uplo,
lapack_int n, float* a, lapack_int lda,
const float* b, lapack_int ldb );
lapack_int LAPACKE_dsygst_work( int matrix_order, lapack_int itype, char uplo,
lapack_int n, double* a, lapack_int lda,
const double* b, lapack_int ldb );
lapack_int LAPACKE_ssygv_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb,
float* w, float* work, lapack_int lwork );
lapack_int LAPACKE_dsygv_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb,
double* w, double* work, lapack_int lwork );
lapack_int LAPACKE_ssygvd_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb,
float* w, float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_dsygvd_work( int matrix_order, lapack_int itype, char jobz,
char uplo, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb,
double* w, double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_ssygvx_work( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb,
float vl, float vu, lapack_int il,
lapack_int iu, float abstol, lapack_int* m,
float* w, float* z, lapack_int ldz, float* work,
lapack_int lwork, lapack_int* iwork,
lapack_int* ifail );
lapack_int LAPACKE_dsygvx_work( int matrix_order, lapack_int itype, char jobz,
char range, char uplo, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb,
double vl, double vu, lapack_int il,
lapack_int iu, double abstol, lapack_int* m,
double* w, double* z, lapack_int ldz,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int* ifail );
lapack_int LAPACKE_ssyrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const float* af, lapack_int ldaf,
const lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* ferr, float* berr, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dsyrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a,
lapack_int lda, const double* af,
lapack_int ldaf, const lapack_int* ipiv,
const double* b, lapack_int ldb, double* x,
lapack_int ldx, double* ferr, double* berr,
double* work, lapack_int* iwork );
lapack_int LAPACKE_csyrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_zsyrfs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_complex_double* af,
lapack_int ldaf, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_ssyrfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs, const float* a,
lapack_int lda, const float* af,
lapack_int ldaf, const lapack_int* ipiv,
const float* s, const float* b, lapack_int ldb,
float* x, lapack_int ldx, float* rcond,
float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dsyrfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs, const double* a,
lapack_int lda, const double* af,
lapack_int ldaf, const lapack_int* ipiv,
const double* s, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, double* work,
lapack_int* iwork );
lapack_int LAPACKE_csyrfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* af,
lapack_int ldaf, const lapack_int* ipiv,
const float* s, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zsyrfsx_work( int matrix_order, char uplo, char equed,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* af,
lapack_int ldaf, const lapack_int* ipiv,
const double* s,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_ssysv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, float* a, lapack_int lda,
lapack_int* ipiv, float* b, lapack_int ldb,
float* work, lapack_int lwork );
lapack_int LAPACKE_dsysv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, double* a, lapack_int lda,
lapack_int* ipiv, double* b, lapack_int ldb,
double* work, lapack_int lwork );
lapack_int LAPACKE_csysv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_float* a,
lapack_int lda, lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zsysv_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, lapack_complex_double* a,
lapack_int lda, lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_ssysvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, const float* a,
lapack_int lda, float* af, lapack_int ldaf,
lapack_int* ipiv, const float* b,
lapack_int ldb, float* x, lapack_int ldx,
float* rcond, float* ferr, float* berr,
float* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_dsysvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, const double* a,
lapack_int lda, double* af, lapack_int ldaf,
lapack_int* ipiv, const double* b,
lapack_int ldb, double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
double* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_csysvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, const lapack_complex_float* b,
lapack_int ldb, lapack_complex_float* x,
lapack_int ldx, float* rcond, float* ferr,
float* berr, lapack_complex_float* work,
lapack_int lwork, float* rwork );
lapack_int LAPACKE_zsysvx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, lapack_int lwork,
double* rwork );
lapack_int LAPACKE_ssysvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* s,
float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* rpvgrw,
float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dsysvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs, double* a,
lapack_int lda, double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* s,
double* b, lapack_int ldb, double* x,
lapack_int ldx, double* rcond, double* rpvgrw,
double* berr, lapack_int n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int nparams, double* params,
double* work, lapack_int* iwork );
lapack_int LAPACKE_csysvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* s,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* x, lapack_int ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int nparams,
float* params, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_zsysvxx_work( int matrix_order, char fact, char uplo,
lapack_int n, lapack_int nrhs,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, double* s,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* x, lapack_int ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int nparams,
double* params, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_ssytrd_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda, float* d, float* e,
float* tau, float* work, lapack_int lwork );
lapack_int LAPACKE_dsytrd_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda, double* d, double* e,
double* tau, double* work, lapack_int lwork );
lapack_int LAPACKE_ssytrf_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda, lapack_int* ipiv,
float* work, lapack_int lwork );
lapack_int LAPACKE_dsytrf_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda, lapack_int* ipiv,
double* work, lapack_int lwork );
lapack_int LAPACKE_csytrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_float* work,
lapack_int lwork );
lapack_int LAPACKE_zsytrf_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_int* ipiv, lapack_complex_double* work,
lapack_int lwork );
lapack_int LAPACKE_ssytri_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda,
const lapack_int* ipiv, float* work );
lapack_int LAPACKE_dsytri_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda,
const lapack_int* ipiv, double* work );
lapack_int LAPACKE_csytri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_float* work );
lapack_int LAPACKE_zsytri_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_double* work );
lapack_int LAPACKE_ssytrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const lapack_int* ipiv, float* b,
lapack_int ldb );
lapack_int LAPACKE_dsytrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a,
lapack_int lda, const lapack_int* ipiv,
double* b, lapack_int ldb );
lapack_int LAPACKE_csytrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_zsytrs_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_stbcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n, lapack_int kd,
const float* ab, lapack_int ldab, float* rcond,
float* work, lapack_int* iwork );
lapack_int LAPACKE_dtbcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n, lapack_int kd,
const double* ab, lapack_int ldab,
double* rcond, double* work,
lapack_int* iwork );
lapack_int LAPACKE_ctbcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n, lapack_int kd,
const lapack_complex_float* ab, lapack_int ldab,
float* rcond, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_ztbcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n, lapack_int kd,
const lapack_complex_double* ab,
lapack_int ldab, double* rcond,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_stbrfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int kd,
lapack_int nrhs, const float* ab,
lapack_int ldab, const float* b, lapack_int ldb,
const float* x, lapack_int ldx, float* ferr,
float* berr, float* work, lapack_int* iwork );
lapack_int LAPACKE_dtbrfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int kd,
lapack_int nrhs, const double* ab,
lapack_int ldab, const double* b,
lapack_int ldb, const double* x, lapack_int ldx,
double* ferr, double* berr, double* work,
lapack_int* iwork );
lapack_int LAPACKE_ctbrfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int kd,
lapack_int nrhs, const lapack_complex_float* ab,
lapack_int ldab, const lapack_complex_float* b,
lapack_int ldb, const lapack_complex_float* x,
lapack_int ldx, float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_ztbrfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int kd,
lapack_int nrhs,
const lapack_complex_double* ab,
lapack_int ldab, const lapack_complex_double* b,
lapack_int ldb, const lapack_complex_double* x,
lapack_int ldx, double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_stbtrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int kd,
lapack_int nrhs, const float* ab,
lapack_int ldab, float* b, lapack_int ldb );
lapack_int LAPACKE_dtbtrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int kd,
lapack_int nrhs, const double* ab,
lapack_int ldab, double* b, lapack_int ldb );
lapack_int LAPACKE_ctbtrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int kd,
lapack_int nrhs, const lapack_complex_float* ab,
lapack_int ldab, lapack_complex_float* b,
lapack_int ldb );
lapack_int LAPACKE_ztbtrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int kd,
lapack_int nrhs,
const lapack_complex_double* ab,
lapack_int ldab, lapack_complex_double* b,
lapack_int ldb );
lapack_int LAPACKE_stfsm_work( int matrix_order, char transr, char side,
char uplo, char trans, char diag, lapack_int m,
lapack_int n, float alpha, const float* a,
float* b, lapack_int ldb );
lapack_int LAPACKE_dtfsm_work( int matrix_order, char transr, char side,
char uplo, char trans, char diag, lapack_int m,
lapack_int n, double alpha, const double* a,
double* b, lapack_int ldb );
lapack_int LAPACKE_ctfsm_work( int matrix_order, char transr, char side,
char uplo, char trans, char diag, lapack_int m,
lapack_int n, lapack_complex_float alpha,
const lapack_complex_float* a,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_ztfsm_work( int matrix_order, char transr, char side,
char uplo, char trans, char diag, lapack_int m,
lapack_int n, lapack_complex_double alpha,
const lapack_complex_double* a,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_stftri_work( int matrix_order, char transr, char uplo,
char diag, lapack_int n, float* a );
lapack_int LAPACKE_dtftri_work( int matrix_order, char transr, char uplo,
char diag, lapack_int n, double* a );
lapack_int LAPACKE_ctftri_work( int matrix_order, char transr, char uplo,
char diag, lapack_int n,
lapack_complex_float* a );
lapack_int LAPACKE_ztftri_work( int matrix_order, char transr, char uplo,
char diag, lapack_int n,
lapack_complex_double* a );
lapack_int LAPACKE_stfttp_work( int matrix_order, char transr, char uplo,
lapack_int n, const float* arf, float* ap );
lapack_int LAPACKE_dtfttp_work( int matrix_order, char transr, char uplo,
lapack_int n, const double* arf, double* ap );
lapack_int LAPACKE_ctfttp_work( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_float* arf,
lapack_complex_float* ap );
lapack_int LAPACKE_ztfttp_work( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_double* arf,
lapack_complex_double* ap );
lapack_int LAPACKE_stfttr_work( int matrix_order, char transr, char uplo,
lapack_int n, const float* arf, float* a,
lapack_int lda );
lapack_int LAPACKE_dtfttr_work( int matrix_order, char transr, char uplo,
lapack_int n, const double* arf, double* a,
lapack_int lda );
lapack_int LAPACKE_ctfttr_work( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_float* arf,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_ztfttr_work( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_double* arf,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_stgevc_work( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
const float* s, lapack_int lds, const float* p,
lapack_int ldp, float* vl, lapack_int ldvl,
float* vr, lapack_int ldvr, lapack_int mm,
lapack_int* m, float* work );
lapack_int LAPACKE_dtgevc_work( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
const double* s, lapack_int lds,
const double* p, lapack_int ldp, double* vl,
lapack_int ldvl, double* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m, double* work );
lapack_int LAPACKE_ctgevc_work( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_float* s, lapack_int lds,
const lapack_complex_float* p, lapack_int ldp,
lapack_complex_float* vl, lapack_int ldvl,
lapack_complex_float* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_ztgevc_work( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_double* s, lapack_int lds,
const lapack_complex_double* p, lapack_int ldp,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_stgexc_work( int matrix_order, lapack_logical wantq,
lapack_logical wantz, lapack_int n, float* a,
lapack_int lda, float* b, lapack_int ldb,
float* q, lapack_int ldq, float* z,
lapack_int ldz, lapack_int* ifst,
lapack_int* ilst, float* work,
lapack_int lwork );
lapack_int LAPACKE_dtgexc_work( int matrix_order, lapack_logical wantq,
lapack_logical wantz, lapack_int n, double* a,
lapack_int lda, double* b, lapack_int ldb,
double* q, lapack_int ldq, double* z,
lapack_int ldz, lapack_int* ifst,
lapack_int* ilst, double* work,
lapack_int lwork );
lapack_int LAPACKE_ctgexc_work( int matrix_order, lapack_logical wantq,
lapack_logical wantz, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* z, lapack_int ldz,
lapack_int ifst, lapack_int ilst );
lapack_int LAPACKE_ztgexc_work( int matrix_order, lapack_logical wantq,
lapack_logical wantz, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* z, lapack_int ldz,
lapack_int ifst, lapack_int ilst );
lapack_int LAPACKE_stgsen_work( int matrix_order, lapack_int ijob,
lapack_logical wantq, lapack_logical wantz,
const lapack_logical* select, lapack_int n,
float* a, lapack_int lda, float* b,
lapack_int ldb, float* alphar, float* alphai,
float* beta, float* q, lapack_int ldq, float* z,
lapack_int ldz, lapack_int* m, float* pl,
float* pr, float* dif, float* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_dtgsen_work( int matrix_order, lapack_int ijob,
lapack_logical wantq, lapack_logical wantz,
const lapack_logical* select, lapack_int n,
double* a, lapack_int lda, double* b,
lapack_int ldb, double* alphar, double* alphai,
double* beta, double* q, lapack_int ldq,
double* z, lapack_int ldz, lapack_int* m,
double* pl, double* pr, double* dif,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_ctgsen_work( int matrix_order, lapack_int ijob,
lapack_logical wantq, lapack_logical wantz,
const lapack_logical* select, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* alpha,
lapack_complex_float* beta,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* z, lapack_int ldz,
lapack_int* m, float* pl, float* pr, float* dif,
lapack_complex_float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_ztgsen_work( int matrix_order, lapack_int ijob,
lapack_logical wantq, lapack_logical wantz,
const lapack_logical* select, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* alpha,
lapack_complex_double* beta,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* z, lapack_int ldz,
lapack_int* m, double* pl, double* pr,
double* dif, lapack_complex_double* work,
lapack_int lwork, lapack_int* iwork,
lapack_int liwork );
lapack_int LAPACKE_stgsja_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int p,
lapack_int n, lapack_int k, lapack_int l,
float* a, lapack_int lda, float* b,
lapack_int ldb, float tola, float tolb,
float* alpha, float* beta, float* u,
lapack_int ldu, float* v, lapack_int ldv,
float* q, lapack_int ldq, float* work,
lapack_int* ncycle );
lapack_int LAPACKE_dtgsja_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int p,
lapack_int n, lapack_int k, lapack_int l,
double* a, lapack_int lda, double* b,
lapack_int ldb, double tola, double tolb,
double* alpha, double* beta, double* u,
lapack_int ldu, double* v, lapack_int ldv,
double* q, lapack_int ldq, double* work,
lapack_int* ncycle );
lapack_int LAPACKE_ctgsja_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int p,
lapack_int n, lapack_int k, lapack_int l,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
float tola, float tolb, float* alpha,
float* beta, lapack_complex_float* u,
lapack_int ldu, lapack_complex_float* v,
lapack_int ldv, lapack_complex_float* q,
lapack_int ldq, lapack_complex_float* work,
lapack_int* ncycle );
lapack_int LAPACKE_ztgsja_work( int matrix_order, char jobu, char jobv,
char jobq, lapack_int m, lapack_int p,
lapack_int n, lapack_int k, lapack_int l,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
double tola, double tolb, double* alpha,
double* beta, lapack_complex_double* u,
lapack_int ldu, lapack_complex_double* v,
lapack_int ldv, lapack_complex_double* q,
lapack_int ldq, lapack_complex_double* work,
lapack_int* ncycle );
lapack_int LAPACKE_stgsna_work( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const float* a, lapack_int lda, const float* b,
lapack_int ldb, const float* vl,
lapack_int ldvl, const float* vr,
lapack_int ldvr, float* s, float* dif,
lapack_int mm, lapack_int* m, float* work,
lapack_int lwork, lapack_int* iwork );
lapack_int LAPACKE_dtgsna_work( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const double* a, lapack_int lda,
const double* b, lapack_int ldb,
const double* vl, lapack_int ldvl,
const double* vr, lapack_int ldvr, double* s,
double* dif, lapack_int mm, lapack_int* m,
double* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_ctgsna_work( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* b, lapack_int ldb,
const lapack_complex_float* vl, lapack_int ldvl,
const lapack_complex_float* vr, lapack_int ldvr,
float* s, float* dif, lapack_int mm,
lapack_int* m, lapack_complex_float* work,
lapack_int lwork, lapack_int* iwork );
lapack_int LAPACKE_ztgsna_work( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* b, lapack_int ldb,
const lapack_complex_double* vl,
lapack_int ldvl,
const lapack_complex_double* vr,
lapack_int ldvr, double* s, double* dif,
lapack_int mm, lapack_int* m,
lapack_complex_double* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_stgsyl_work( int matrix_order, char trans, lapack_int ijob,
lapack_int m, lapack_int n, const float* a,
lapack_int lda, const float* b, lapack_int ldb,
float* c, lapack_int ldc, const float* d,
lapack_int ldd, const float* e, lapack_int lde,
float* f, lapack_int ldf, float* scale,
float* dif, float* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_dtgsyl_work( int matrix_order, char trans, lapack_int ijob,
lapack_int m, lapack_int n, const double* a,
lapack_int lda, const double* b, lapack_int ldb,
double* c, lapack_int ldc, const double* d,
lapack_int ldd, const double* e, lapack_int lde,
double* f, lapack_int ldf, double* scale,
double* dif, double* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_ctgsyl_work( int matrix_order, char trans, lapack_int ijob,
lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* c, lapack_int ldc,
const lapack_complex_float* d, lapack_int ldd,
const lapack_complex_float* e, lapack_int lde,
lapack_complex_float* f, lapack_int ldf,
float* scale, float* dif,
lapack_complex_float* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_ztgsyl_work( int matrix_order, char trans, lapack_int ijob,
lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* c, lapack_int ldc,
const lapack_complex_double* d, lapack_int ldd,
const lapack_complex_double* e, lapack_int lde,
lapack_complex_double* f, lapack_int ldf,
double* scale, double* dif,
lapack_complex_double* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_stpcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n, const float* ap,
float* rcond, float* work, lapack_int* iwork );
lapack_int LAPACKE_dtpcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n, const double* ap,
double* rcond, double* work,
lapack_int* iwork );
lapack_int LAPACKE_ctpcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n,
const lapack_complex_float* ap, float* rcond,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_ztpcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n,
const lapack_complex_double* ap, double* rcond,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_stprfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const float* ap, const float* b, lapack_int ldb,
const float* x, lapack_int ldx, float* ferr,
float* berr, float* work, lapack_int* iwork );
lapack_int LAPACKE_dtprfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const double* ap, const double* b,
lapack_int ldb, const double* x, lapack_int ldx,
double* ferr, double* berr, double* work,
lapack_int* iwork );
lapack_int LAPACKE_ctprfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const lapack_complex_float* ap,
const lapack_complex_float* b, lapack_int ldb,
const lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_ztprfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const lapack_complex_double* ap,
const lapack_complex_double* b, lapack_int ldb,
const lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_stptri_work( int matrix_order, char uplo, char diag,
lapack_int n, float* ap );
lapack_int LAPACKE_dtptri_work( int matrix_order, char uplo, char diag,
lapack_int n, double* ap );
lapack_int LAPACKE_ctptri_work( int matrix_order, char uplo, char diag,
lapack_int n, lapack_complex_float* ap );
lapack_int LAPACKE_ztptri_work( int matrix_order, char uplo, char diag,
lapack_int n, lapack_complex_double* ap );
lapack_int LAPACKE_stptrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const float* ap, float* b, lapack_int ldb );
lapack_int LAPACKE_dtptrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const double* ap, double* b, lapack_int ldb );
lapack_int LAPACKE_ctptrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const lapack_complex_float* ap,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_ztptrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const lapack_complex_double* ap,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_stpttf_work( int matrix_order, char transr, char uplo,
lapack_int n, const float* ap, float* arf );
lapack_int LAPACKE_dtpttf_work( int matrix_order, char transr, char uplo,
lapack_int n, const double* ap, double* arf );
lapack_int LAPACKE_ctpttf_work( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_float* ap,
lapack_complex_float* arf );
lapack_int LAPACKE_ztpttf_work( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_double* ap,
lapack_complex_double* arf );
lapack_int LAPACKE_stpttr_work( int matrix_order, char uplo, lapack_int n,
const float* ap, float* a, lapack_int lda );
lapack_int LAPACKE_dtpttr_work( int matrix_order, char uplo, lapack_int n,
const double* ap, double* a, lapack_int lda );
lapack_int LAPACKE_ctpttr_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_ztpttr_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_strcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n, const float* a,
lapack_int lda, float* rcond, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dtrcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n, const double* a,
lapack_int lda, double* rcond, double* work,
lapack_int* iwork );
lapack_int LAPACKE_ctrcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
float* rcond, lapack_complex_float* work,
float* rwork );
lapack_int LAPACKE_ztrcon_work( int matrix_order, char norm, char uplo,
char diag, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
double* rcond, lapack_complex_double* work,
double* rwork );
lapack_int LAPACKE_strevc_work( int matrix_order, char side, char howmny,
lapack_logical* select, lapack_int n,
const float* t, lapack_int ldt, float* vl,
lapack_int ldvl, float* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m, float* work );
lapack_int LAPACKE_dtrevc_work( int matrix_order, char side, char howmny,
lapack_logical* select, lapack_int n,
const double* t, lapack_int ldt, double* vl,
lapack_int ldvl, double* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m, double* work );
lapack_int LAPACKE_ctrevc_work( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* vl, lapack_int ldvl,
lapack_complex_float* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_ztrevc_work( int matrix_order, char side, char howmny,
const lapack_logical* select, lapack_int n,
lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* vl, lapack_int ldvl,
lapack_complex_double* vr, lapack_int ldvr,
lapack_int mm, lapack_int* m,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_strexc_work( int matrix_order, char compq, lapack_int n,
float* t, lapack_int ldt, float* q,
lapack_int ldq, lapack_int* ifst,
lapack_int* ilst, float* work );
lapack_int LAPACKE_dtrexc_work( int matrix_order, char compq, lapack_int n,
double* t, lapack_int ldt, double* q,
lapack_int ldq, lapack_int* ifst,
lapack_int* ilst, double* work );
lapack_int LAPACKE_ctrexc_work( int matrix_order, char compq, lapack_int n,
lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* q, lapack_int ldq,
lapack_int ifst, lapack_int ilst );
lapack_int LAPACKE_ztrexc_work( int matrix_order, char compq, lapack_int n,
lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* q, lapack_int ldq,
lapack_int ifst, lapack_int ilst );
lapack_int LAPACKE_strrfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const float* a, lapack_int lda, const float* b,
lapack_int ldb, const float* x, lapack_int ldx,
float* ferr, float* berr, float* work,
lapack_int* iwork );
lapack_int LAPACKE_dtrrfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const double* a, lapack_int lda,
const double* b, lapack_int ldb,
const double* x, lapack_int ldx, double* ferr,
double* berr, double* work, lapack_int* iwork );
lapack_int LAPACKE_ctrrfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* b, lapack_int ldb,
const lapack_complex_float* x, lapack_int ldx,
float* ferr, float* berr,
lapack_complex_float* work, float* rwork );
lapack_int LAPACKE_ztrrfs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* b, lapack_int ldb,
const lapack_complex_double* x, lapack_int ldx,
double* ferr, double* berr,
lapack_complex_double* work, double* rwork );
lapack_int LAPACKE_strsen_work( int matrix_order, char job, char compq,
const lapack_logical* select, lapack_int n,
float* t, lapack_int ldt, float* q,
lapack_int ldq, float* wr, float* wi,
lapack_int* m, float* s, float* sep,
float* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_dtrsen_work( int matrix_order, char job, char compq,
const lapack_logical* select, lapack_int n,
double* t, lapack_int ldt, double* q,
lapack_int ldq, double* wr, double* wi,
lapack_int* m, double* s, double* sep,
double* work, lapack_int lwork,
lapack_int* iwork, lapack_int liwork );
lapack_int LAPACKE_ctrsen_work( int matrix_order, char job, char compq,
const lapack_logical* select, lapack_int n,
lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* w, lapack_int* m,
float* s, float* sep,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_ztrsen_work( int matrix_order, char job, char compq,
const lapack_logical* select, lapack_int n,
lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* w, lapack_int* m,
double* s, double* sep,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_strsna_work( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const float* t, lapack_int ldt, const float* vl,
lapack_int ldvl, const float* vr,
lapack_int ldvr, float* s, float* sep,
lapack_int mm, lapack_int* m, float* work,
lapack_int ldwork, lapack_int* iwork );
lapack_int LAPACKE_dtrsna_work( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const double* t, lapack_int ldt,
const double* vl, lapack_int ldvl,
const double* vr, lapack_int ldvr, double* s,
double* sep, lapack_int mm, lapack_int* m,
double* work, lapack_int ldwork,
lapack_int* iwork );
lapack_int LAPACKE_ctrsna_work( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_float* t, lapack_int ldt,
const lapack_complex_float* vl, lapack_int ldvl,
const lapack_complex_float* vr, lapack_int ldvr,
float* s, float* sep, lapack_int mm,
lapack_int* m, lapack_complex_float* work,
lapack_int ldwork, float* rwork );
lapack_int LAPACKE_ztrsna_work( int matrix_order, char job, char howmny,
const lapack_logical* select, lapack_int n,
const lapack_complex_double* t, lapack_int ldt,
const lapack_complex_double* vl,
lapack_int ldvl,
const lapack_complex_double* vr,
lapack_int ldvr, double* s, double* sep,
lapack_int mm, lapack_int* m,
lapack_complex_double* work, lapack_int ldwork,
double* rwork );
lapack_int LAPACKE_strsyl_work( int matrix_order, char trana, char tranb,
lapack_int isgn, lapack_int m, lapack_int n,
const float* a, lapack_int lda, const float* b,
lapack_int ldb, float* c, lapack_int ldc,
float* scale );
lapack_int LAPACKE_dtrsyl_work( int matrix_order, char trana, char tranb,
lapack_int isgn, lapack_int m, lapack_int n,
const double* a, lapack_int lda,
const double* b, lapack_int ldb, double* c,
lapack_int ldc, double* scale );
lapack_int LAPACKE_ctrsyl_work( int matrix_order, char trana, char tranb,
lapack_int isgn, lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* c, lapack_int ldc,
float* scale );
lapack_int LAPACKE_ztrsyl_work( int matrix_order, char trana, char tranb,
lapack_int isgn, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* c, lapack_int ldc,
double* scale );
lapack_int LAPACKE_strtri_work( int matrix_order, char uplo, char diag,
lapack_int n, float* a, lapack_int lda );
lapack_int LAPACKE_dtrtri_work( int matrix_order, char uplo, char diag,
lapack_int n, double* a, lapack_int lda );
lapack_int LAPACKE_ctrtri_work( int matrix_order, char uplo, char diag,
lapack_int n, lapack_complex_float* a,
lapack_int lda );
lapack_int LAPACKE_ztrtri_work( int matrix_order, char uplo, char diag,
lapack_int n, lapack_complex_double* a,
lapack_int lda );
lapack_int LAPACKE_strtrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const float* a, lapack_int lda, float* b,
lapack_int ldb );
lapack_int LAPACKE_dtrtrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const double* a, lapack_int lda, double* b,
lapack_int ldb );
lapack_int LAPACKE_ctrtrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_ztrtrs_work( int matrix_order, char uplo, char trans,
char diag, lapack_int n, lapack_int nrhs,
const lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_strttf_work( int matrix_order, char transr, char uplo,
lapack_int n, const float* a, lapack_int lda,
float* arf );
lapack_int LAPACKE_dtrttf_work( int matrix_order, char transr, char uplo,
lapack_int n, const double* a, lapack_int lda,
double* arf );
lapack_int LAPACKE_ctrttf_work( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_float* a,
lapack_int lda, lapack_complex_float* arf );
lapack_int LAPACKE_ztrttf_work( int matrix_order, char transr, char uplo,
lapack_int n, const lapack_complex_double* a,
lapack_int lda, lapack_complex_double* arf );
lapack_int LAPACKE_strttp_work( int matrix_order, char uplo, lapack_int n,
const float* a, lapack_int lda, float* ap );
lapack_int LAPACKE_dtrttp_work( int matrix_order, char uplo, lapack_int n,
const double* a, lapack_int lda, double* ap );
lapack_int LAPACKE_ctrttp_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
lapack_complex_float* ap );
lapack_int LAPACKE_ztrttp_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
lapack_complex_double* ap );
lapack_int LAPACKE_stzrzf_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* tau,
float* work, lapack_int lwork );
lapack_int LAPACKE_dtzrzf_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* tau,
double* work, lapack_int lwork );
lapack_int LAPACKE_ctzrzf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_ztzrzf_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cungbr_work( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int k,
lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zungbr_work( int matrix_order, char vect, lapack_int m,
lapack_int n, lapack_int k,
lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunghr_work( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunghr_work( int matrix_order, lapack_int n, lapack_int ilo,
lapack_int ihi, lapack_complex_double* a,
lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunglq_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunglq_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_double* a,
lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cungql_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zungql_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_double* a,
lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cungqr_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zungqr_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_double* a,
lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cungrq_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zungrq_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int k, lapack_complex_double* a,
lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cungtr_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zungtr_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunmbr_work( int matrix_order, char vect, char side,
char trans, lapack_int m, lapack_int n,
lapack_int k, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunmbr_work( int matrix_order, char vect, char side,
char trans, lapack_int m, lapack_int n,
lapack_int k, const lapack_complex_double* a,
lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunmhr_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int ilo,
lapack_int ihi, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunmhr_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int ilo,
lapack_int ihi, const lapack_complex_double* a,
lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunmlq_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunmlq_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunmql_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunmql_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunmqr_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunmqr_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunmrq_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunmrq_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunmrz_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, const lapack_complex_float* a,
lapack_int lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunmrz_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, const lapack_complex_double* a,
lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cunmtr_work( int matrix_order, char side, char uplo,
char trans, lapack_int m, lapack_int n,
const lapack_complex_float* a, lapack_int lda,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_zunmtr_work( int matrix_order, char side, char uplo,
char trans, lapack_int m, lapack_int n,
const lapack_complex_double* a, lapack_int lda,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_cupgtr_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_float* ap,
const lapack_complex_float* tau,
lapack_complex_float* q, lapack_int ldq,
lapack_complex_float* work );
lapack_int LAPACKE_zupgtr_work( int matrix_order, char uplo, lapack_int n,
const lapack_complex_double* ap,
const lapack_complex_double* tau,
lapack_complex_double* q, lapack_int ldq,
lapack_complex_double* work );
lapack_int LAPACKE_cupmtr_work( int matrix_order, char side, char uplo,
char trans, lapack_int m, lapack_int n,
const lapack_complex_float* ap,
const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int ldc,
lapack_complex_float* work );
lapack_int LAPACKE_zupmtr_work( int matrix_order, char side, char uplo,
char trans, lapack_int m, lapack_int n,
const lapack_complex_double* ap,
const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int ldc,
lapack_complex_double* work );
lapack_int LAPACKE_claghe( int matrix_order, lapack_int n, lapack_int k,
const float* d, lapack_complex_float* a,
lapack_int lda, lapack_int* iseed );
lapack_int LAPACKE_zlaghe( int matrix_order, lapack_int n, lapack_int k,
const double* d, lapack_complex_double* a,
lapack_int lda, lapack_int* iseed );
lapack_int LAPACKE_slagsy( int matrix_order, lapack_int n, lapack_int k,
const float* d, float* a, lapack_int lda,
lapack_int* iseed );
lapack_int LAPACKE_dlagsy( int matrix_order, lapack_int n, lapack_int k,
const double* d, double* a, lapack_int lda,
lapack_int* iseed );
lapack_int LAPACKE_clagsy( int matrix_order, lapack_int n, lapack_int k,
const float* d, lapack_complex_float* a,
lapack_int lda, lapack_int* iseed );
lapack_int LAPACKE_zlagsy( int matrix_order, lapack_int n, lapack_int k,
const double* d, lapack_complex_double* a,
lapack_int lda, lapack_int* iseed );
lapack_int LAPACKE_slapmr( int matrix_order, lapack_logical forwrd,
lapack_int m, lapack_int n, float* x, lapack_int ldx,
lapack_int* k );
lapack_int LAPACKE_dlapmr( int matrix_order, lapack_logical forwrd,
lapack_int m, lapack_int n, double* x,
lapack_int ldx, lapack_int* k );
lapack_int LAPACKE_clapmr( int matrix_order, lapack_logical forwrd,
lapack_int m, lapack_int n, lapack_complex_float* x,
lapack_int ldx, lapack_int* k );
lapack_int LAPACKE_zlapmr( int matrix_order, lapack_logical forwrd,
lapack_int m, lapack_int n, lapack_complex_double* x,
lapack_int ldx, lapack_int* k );
float LAPACKE_slapy2( float x, float y );
double LAPACKE_dlapy2( double x, double y );
float LAPACKE_slapy3( float x, float y, float z );
double LAPACKE_dlapy3( double x, double y, double z );
lapack_int LAPACKE_slartgp( float f, float g, float* cs, float* sn, float* r );
lapack_int LAPACKE_dlartgp( double f, double g, double* cs, double* sn,
double* r );
lapack_int LAPACKE_slartgs( float x, float y, float sigma, float* cs,
float* sn );
lapack_int LAPACKE_dlartgs( double x, double y, double sigma, double* cs,
double* sn );
//LAPACK 3.3.0
lapack_int LAPACKE_cbbcsd( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans, lapack_int m,
lapack_int p, lapack_int q, float* theta, float* phi,
lapack_complex_float* u1, lapack_int ldu1,
lapack_complex_float* u2, lapack_int ldu2,
lapack_complex_float* v1t, lapack_int ldv1t,
lapack_complex_float* v2t, lapack_int ldv2t,
float* b11d, float* b11e, float* b12d, float* b12e,
float* b21d, float* b21e, float* b22d, float* b22e );
lapack_int LAPACKE_cbbcsd_work( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans,
lapack_int m, lapack_int p, lapack_int q,
float* theta, float* phi,
lapack_complex_float* u1, lapack_int ldu1,
lapack_complex_float* u2, lapack_int ldu2,
lapack_complex_float* v1t, lapack_int ldv1t,
lapack_complex_float* v2t, lapack_int ldv2t,
float* b11d, float* b11e, float* b12d,
float* b12e, float* b21d, float* b21e,
float* b22d, float* b22e, float* rwork,
lapack_int lrwork );
lapack_int LAPACKE_cheswapr( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int i1,
lapack_int i2 );
lapack_int LAPACKE_cheswapr_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int i1,
lapack_int i2 );
lapack_int LAPACKE_chetri2( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_chetri2_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_chetri2x( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv, lapack_int nb );
lapack_int LAPACKE_chetri2x_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int nb );
lapack_int LAPACKE_chetrs2( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_chetrs2_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* work );
lapack_int LAPACKE_csyconv( int matrix_order, char uplo, char way, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_csyconv_work( int matrix_order, char uplo, char way,
lapack_int n, lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* work );
lapack_int LAPACKE_csyswapr( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int i1,
lapack_int i2 );
lapack_int LAPACKE_csyswapr_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int i1,
lapack_int i2 );
lapack_int LAPACKE_csytri2( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_csytri2_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_csytri2x( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv, lapack_int nb );
lapack_int LAPACKE_csytri2x_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int nb );
lapack_int LAPACKE_csytrs2( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_csytrs2_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_float* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* work );
lapack_int LAPACKE_cunbdb( int matrix_order, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
lapack_complex_float* x11, lapack_int ldx11,
lapack_complex_float* x12, lapack_int ldx12,
lapack_complex_float* x21, lapack_int ldx21,
lapack_complex_float* x22, lapack_int ldx22,
float* theta, float* phi,
lapack_complex_float* taup1,
lapack_complex_float* taup2,
lapack_complex_float* tauq1,
lapack_complex_float* tauq2 );
lapack_int LAPACKE_cunbdb_work( int matrix_order, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
lapack_complex_float* x11, lapack_int ldx11,
lapack_complex_float* x12, lapack_int ldx12,
lapack_complex_float* x21, lapack_int ldx21,
lapack_complex_float* x22, lapack_int ldx22,
float* theta, float* phi,
lapack_complex_float* taup1,
lapack_complex_float* taup2,
lapack_complex_float* tauq1,
lapack_complex_float* tauq2,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_cuncsd( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
lapack_complex_float* x11, lapack_int ldx11,
lapack_complex_float* x12, lapack_int ldx12,
lapack_complex_float* x21, lapack_int ldx21,
lapack_complex_float* x22, lapack_int ldx22,
float* theta, lapack_complex_float* u1,
lapack_int ldu1, lapack_complex_float* u2,
lapack_int ldu2, lapack_complex_float* v1t,
lapack_int ldv1t, lapack_complex_float* v2t,
lapack_int ldv2t );
lapack_int LAPACKE_cuncsd_work( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans,
char signs, lapack_int m, lapack_int p,
lapack_int q, lapack_complex_float* x11,
lapack_int ldx11, lapack_complex_float* x12,
lapack_int ldx12, lapack_complex_float* x21,
lapack_int ldx21, lapack_complex_float* x22,
lapack_int ldx22, float* theta,
lapack_complex_float* u1, lapack_int ldu1,
lapack_complex_float* u2, lapack_int ldu2,
lapack_complex_float* v1t, lapack_int ldv1t,
lapack_complex_float* v2t, lapack_int ldv2t,
lapack_complex_float* work, lapack_int lwork,
float* rwork, lapack_int lrwork,
lapack_int* iwork );
lapack_int LAPACKE_dbbcsd( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans, lapack_int m,
lapack_int p, lapack_int q, double* theta,
double* phi, double* u1, lapack_int ldu1, double* u2,
lapack_int ldu2, double* v1t, lapack_int ldv1t,
double* v2t, lapack_int ldv2t, double* b11d,
double* b11e, double* b12d, double* b12e,
double* b21d, double* b21e, double* b22d,
double* b22e );
lapack_int LAPACKE_dbbcsd_work( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans,
lapack_int m, lapack_int p, lapack_int q,
double* theta, double* phi, double* u1,
lapack_int ldu1, double* u2, lapack_int ldu2,
double* v1t, lapack_int ldv1t, double* v2t,
lapack_int ldv2t, double* b11d, double* b11e,
double* b12d, double* b12e, double* b21d,
double* b21e, double* b22d, double* b22e,
double* work, lapack_int lwork );
lapack_int LAPACKE_dorbdb( int matrix_order, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
double* x11, lapack_int ldx11, double* x12,
lapack_int ldx12, double* x21, lapack_int ldx21,
double* x22, lapack_int ldx22, double* theta,
double* phi, double* taup1, double* taup2,
double* tauq1, double* tauq2 );
lapack_int LAPACKE_dorbdb_work( int matrix_order, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
double* x11, lapack_int ldx11, double* x12,
lapack_int ldx12, double* x21, lapack_int ldx21,
double* x22, lapack_int ldx22, double* theta,
double* phi, double* taup1, double* taup2,
double* tauq1, double* tauq2, double* work,
lapack_int lwork );
lapack_int LAPACKE_dorcsd( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
double* x11, lapack_int ldx11, double* x12,
lapack_int ldx12, double* x21, lapack_int ldx21,
double* x22, lapack_int ldx22, double* theta,
double* u1, lapack_int ldu1, double* u2,
lapack_int ldu2, double* v1t, lapack_int ldv1t,
double* v2t, lapack_int ldv2t );
lapack_int LAPACKE_dorcsd_work( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans,
char signs, lapack_int m, lapack_int p,
lapack_int q, double* x11, lapack_int ldx11,
double* x12, lapack_int ldx12, double* x21,
lapack_int ldx21, double* x22, lapack_int ldx22,
double* theta, double* u1, lapack_int ldu1,
double* u2, lapack_int ldu2, double* v1t,
lapack_int ldv1t, double* v2t, lapack_int ldv2t,
double* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_dsyconv( int matrix_order, char uplo, char way, lapack_int n,
double* a, lapack_int lda, const lapack_int* ipiv );
lapack_int LAPACKE_dsyconv_work( int matrix_order, char uplo, char way,
lapack_int n, double* a, lapack_int lda,
const lapack_int* ipiv, double* work );
lapack_int LAPACKE_dsyswapr( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int i1, lapack_int i2 );
lapack_int LAPACKE_dsyswapr_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int i1, lapack_int i2 );
lapack_int LAPACKE_dsytri2( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda, const lapack_int* ipiv );
lapack_int LAPACKE_dsytri2_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_dsytri2x( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda, const lapack_int* ipiv,
lapack_int nb );
lapack_int LAPACKE_dsytri2x_work( int matrix_order, char uplo, lapack_int n,
double* a, lapack_int lda,
const lapack_int* ipiv, double* work,
lapack_int nb );
lapack_int LAPACKE_dsytrs2( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a, lapack_int lda,
const lapack_int* ipiv, double* b, lapack_int ldb );
lapack_int LAPACKE_dsytrs2_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const double* a,
lapack_int lda, const lapack_int* ipiv,
double* b, lapack_int ldb, double* work );
lapack_int LAPACKE_sbbcsd( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans, lapack_int m,
lapack_int p, lapack_int q, float* theta, float* phi,
float* u1, lapack_int ldu1, float* u2,
lapack_int ldu2, float* v1t, lapack_int ldv1t,
float* v2t, lapack_int ldv2t, float* b11d,
float* b11e, float* b12d, float* b12e, float* b21d,
float* b21e, float* b22d, float* b22e );
lapack_int LAPACKE_sbbcsd_work( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans,
lapack_int m, lapack_int p, lapack_int q,
float* theta, float* phi, float* u1,
lapack_int ldu1, float* u2, lapack_int ldu2,
float* v1t, lapack_int ldv1t, float* v2t,
lapack_int ldv2t, float* b11d, float* b11e,
float* b12d, float* b12e, float* b21d,
float* b21e, float* b22d, float* b22e,
float* work, lapack_int lwork );
lapack_int LAPACKE_sorbdb( int matrix_order, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q, float* x11,
lapack_int ldx11, float* x12, lapack_int ldx12,
float* x21, lapack_int ldx21, float* x22,
lapack_int ldx22, float* theta, float* phi,
float* taup1, float* taup2, float* tauq1,
float* tauq2 );
lapack_int LAPACKE_sorbdb_work( int matrix_order, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
float* x11, lapack_int ldx11, float* x12,
lapack_int ldx12, float* x21, lapack_int ldx21,
float* x22, lapack_int ldx22, float* theta,
float* phi, float* taup1, float* taup2,
float* tauq1, float* tauq2, float* work,
lapack_int lwork );
lapack_int LAPACKE_sorcsd( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q, float* x11,
lapack_int ldx11, float* x12, lapack_int ldx12,
float* x21, lapack_int ldx21, float* x22,
lapack_int ldx22, float* theta, float* u1,
lapack_int ldu1, float* u2, lapack_int ldu2,
float* v1t, lapack_int ldv1t, float* v2t,
lapack_int ldv2t );
lapack_int LAPACKE_sorcsd_work( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans,
char signs, lapack_int m, lapack_int p,
lapack_int q, float* x11, lapack_int ldx11,
float* x12, lapack_int ldx12, float* x21,
lapack_int ldx21, float* x22, lapack_int ldx22,
float* theta, float* u1, lapack_int ldu1,
float* u2, lapack_int ldu2, float* v1t,
lapack_int ldv1t, float* v2t, lapack_int ldv2t,
float* work, lapack_int lwork,
lapack_int* iwork );
lapack_int LAPACKE_ssyconv( int matrix_order, char uplo, char way, lapack_int n,
float* a, lapack_int lda, const lapack_int* ipiv );
lapack_int LAPACKE_ssyconv_work( int matrix_order, char uplo, char way,
lapack_int n, float* a, lapack_int lda,
const lapack_int* ipiv, float* work );
lapack_int LAPACKE_ssyswapr( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int i1, lapack_int i2 );
lapack_int LAPACKE_ssyswapr_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int i1, lapack_int i2 );
lapack_int LAPACKE_ssytri2( int matrix_order, char uplo, lapack_int n, float* a,
lapack_int lda, const lapack_int* ipiv );
lapack_int LAPACKE_ssytri2_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int lwork );
lapack_int LAPACKE_ssytri2x( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda, const lapack_int* ipiv,
lapack_int nb );
lapack_int LAPACKE_ssytri2x_work( int matrix_order, char uplo, lapack_int n,
float* a, lapack_int lda,
const lapack_int* ipiv, float* work,
lapack_int nb );
lapack_int LAPACKE_ssytrs2( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a, lapack_int lda,
const lapack_int* ipiv, float* b, lapack_int ldb );
lapack_int LAPACKE_ssytrs2_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const float* a,
lapack_int lda, const lapack_int* ipiv,
float* b, lapack_int ldb, float* work );
lapack_int LAPACKE_zbbcsd( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans, lapack_int m,
lapack_int p, lapack_int q, double* theta,
double* phi, lapack_complex_double* u1,
lapack_int ldu1, lapack_complex_double* u2,
lapack_int ldu2, lapack_complex_double* v1t,
lapack_int ldv1t, lapack_complex_double* v2t,
lapack_int ldv2t, double* b11d, double* b11e,
double* b12d, double* b12e, double* b21d,
double* b21e, double* b22d, double* b22e );
lapack_int LAPACKE_zbbcsd_work( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans,
lapack_int m, lapack_int p, lapack_int q,
double* theta, double* phi,
lapack_complex_double* u1, lapack_int ldu1,
lapack_complex_double* u2, lapack_int ldu2,
lapack_complex_double* v1t, lapack_int ldv1t,
lapack_complex_double* v2t, lapack_int ldv2t,
double* b11d, double* b11e, double* b12d,
double* b12e, double* b21d, double* b21e,
double* b22d, double* b22e, double* rwork,
lapack_int lrwork );
lapack_int LAPACKE_zheswapr( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int i1,
lapack_int i2 );
lapack_int LAPACKE_zheswapr_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int i1,
lapack_int i2 );
lapack_int LAPACKE_zhetri2( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_zhetri2_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_zhetri2x( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv, lapack_int nb );
lapack_int LAPACKE_zhetri2x_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int nb );
lapack_int LAPACKE_zhetrs2( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_zhetrs2_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* work );
lapack_int LAPACKE_zsyconv( int matrix_order, char uplo, char way, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_zsyconv_work( int matrix_order, char uplo, char way,
lapack_int n, lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* work );
lapack_int LAPACKE_zsyswapr( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int i1,
lapack_int i2 );
lapack_int LAPACKE_zsyswapr_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int i1,
lapack_int i2 );
lapack_int LAPACKE_zsytri2( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv );
lapack_int LAPACKE_zsytri2_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_zsytri2x( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv, lapack_int nb );
lapack_int LAPACKE_zsytri2x_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int nb );
lapack_int LAPACKE_zsytrs2( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_zsytrs2_work( int matrix_order, char uplo, lapack_int n,
lapack_int nrhs, const lapack_complex_double* a,
lapack_int lda, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* work );
lapack_int LAPACKE_zunbdb( int matrix_order, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
lapack_complex_double* x11, lapack_int ldx11,
lapack_complex_double* x12, lapack_int ldx12,
lapack_complex_double* x21, lapack_int ldx21,
lapack_complex_double* x22, lapack_int ldx22,
double* theta, double* phi,
lapack_complex_double* taup1,
lapack_complex_double* taup2,
lapack_complex_double* tauq1,
lapack_complex_double* tauq2 );
lapack_int LAPACKE_zunbdb_work( int matrix_order, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
lapack_complex_double* x11, lapack_int ldx11,
lapack_complex_double* x12, lapack_int ldx12,
lapack_complex_double* x21, lapack_int ldx21,
lapack_complex_double* x22, lapack_int ldx22,
double* theta, double* phi,
lapack_complex_double* taup1,
lapack_complex_double* taup2,
lapack_complex_double* tauq1,
lapack_complex_double* tauq2,
lapack_complex_double* work, lapack_int lwork );
lapack_int LAPACKE_zuncsd( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans, char signs,
lapack_int m, lapack_int p, lapack_int q,
lapack_complex_double* x11, lapack_int ldx11,
lapack_complex_double* x12, lapack_int ldx12,
lapack_complex_double* x21, lapack_int ldx21,
lapack_complex_double* x22, lapack_int ldx22,
double* theta, lapack_complex_double* u1,
lapack_int ldu1, lapack_complex_double* u2,
lapack_int ldu2, lapack_complex_double* v1t,
lapack_int ldv1t, lapack_complex_double* v2t,
lapack_int ldv2t );
lapack_int LAPACKE_zuncsd_work( int matrix_order, char jobu1, char jobu2,
char jobv1t, char jobv2t, char trans,
char signs, lapack_int m, lapack_int p,
lapack_int q, lapack_complex_double* x11,
lapack_int ldx11, lapack_complex_double* x12,
lapack_int ldx12, lapack_complex_double* x21,
lapack_int ldx21, lapack_complex_double* x22,
lapack_int ldx22, double* theta,
lapack_complex_double* u1, lapack_int ldu1,
lapack_complex_double* u2, lapack_int ldu2,
lapack_complex_double* v1t, lapack_int ldv1t,
lapack_complex_double* v2t, lapack_int ldv2t,
lapack_complex_double* work, lapack_int lwork,
double* rwork, lapack_int lrwork,
lapack_int* iwork );
//LAPACK 3.4.0
lapack_int LAPACKE_sgemqrt( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int nb, const float* v, lapack_int ldv,
const float* t, lapack_int ldt, float* c,
lapack_int ldc );
lapack_int LAPACKE_dgemqrt( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int nb, const double* v, lapack_int ldv,
const double* t, lapack_int ldt, double* c,
lapack_int ldc );
lapack_int LAPACKE_cgemqrt( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int nb, const lapack_complex_float* v,
lapack_int ldv, const lapack_complex_float* t,
lapack_int ldt, lapack_complex_float* c,
lapack_int ldc );
lapack_int LAPACKE_zgemqrt( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int nb, const lapack_complex_double* v,
lapack_int ldv, const lapack_complex_double* t,
lapack_int ldt, lapack_complex_double* c,
lapack_int ldc );
lapack_int LAPACKE_sgeqrt( int matrix_order, lapack_int m, lapack_int n,
lapack_int nb, float* a, lapack_int lda, float* t,
lapack_int ldt );
lapack_int LAPACKE_dgeqrt( int matrix_order, lapack_int m, lapack_int n,
lapack_int nb, double* a, lapack_int lda, double* t,
lapack_int ldt );
lapack_int LAPACKE_cgeqrt( int matrix_order, lapack_int m, lapack_int n,
lapack_int nb, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* t,
lapack_int ldt );
lapack_int LAPACKE_zgeqrt( int matrix_order, lapack_int m, lapack_int n,
lapack_int nb, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* t,
lapack_int ldt );
lapack_int LAPACKE_sgeqrt2( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* t,
lapack_int ldt );
lapack_int LAPACKE_dgeqrt2( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* t,
lapack_int ldt );
lapack_int LAPACKE_cgeqrt2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* t, lapack_int ldt );
lapack_int LAPACKE_zgeqrt2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* t, lapack_int ldt );
lapack_int LAPACKE_sgeqrt3( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* t,
lapack_int ldt );
lapack_int LAPACKE_dgeqrt3( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* t,
lapack_int ldt );
lapack_int LAPACKE_cgeqrt3( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* t, lapack_int ldt );
lapack_int LAPACKE_zgeqrt3( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* t, lapack_int ldt );
lapack_int LAPACKE_stpmqrt( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, lapack_int nb, const float* v,
lapack_int ldv, const float* t, lapack_int ldt,
float* a, lapack_int lda, float* b,
lapack_int ldb );
lapack_int LAPACKE_dtpmqrt( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, lapack_int nb, const double* v,
lapack_int ldv, const double* t, lapack_int ldt,
double* a, lapack_int lda, double* b,
lapack_int ldb );
lapack_int LAPACKE_ctpmqrt( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, lapack_int nb,
const lapack_complex_float* v, lapack_int ldv,
const lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb );
lapack_int LAPACKE_ztpmqrt( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, lapack_int nb,
const lapack_complex_double* v, lapack_int ldv,
const lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb );
lapack_int LAPACKE_dtpqrt( int matrix_order, lapack_int m, lapack_int n,
lapack_int l, lapack_int nb, double* a,
lapack_int lda, double* b, lapack_int ldb, double* t,
lapack_int ldt );
lapack_int LAPACKE_ctpqrt( int matrix_order, lapack_int m, lapack_int n,
lapack_int l, lapack_int nb, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* t,
lapack_complex_float* b, lapack_int ldb,
lapack_int ldt );
lapack_int LAPACKE_ztpqrt( int matrix_order, lapack_int m, lapack_int n,
lapack_int l, lapack_int nb,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* t, lapack_int ldt );
lapack_int LAPACKE_stpqrt2( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* b, lapack_int ldb,
float* t, lapack_int ldt );
lapack_int LAPACKE_dtpqrt2( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* b,
lapack_int ldb, double* t, lapack_int ldt );
lapack_int LAPACKE_ctpqrt2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* t, lapack_int ldt );
lapack_int LAPACKE_ztpqrt2( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* t, lapack_int ldt );
lapack_int LAPACKE_stprfb( int matrix_order, char side, char trans, char direct,
char storev, lapack_int m, lapack_int n,
lapack_int k, lapack_int l, const float* v,
lapack_int ldv, const float* t, lapack_int ldt,
float* a, lapack_int lda, float* b, lapack_int ldb,
lapack_int myldwork );
lapack_int LAPACKE_dtprfb( int matrix_order, char side, char trans, char direct,
char storev, lapack_int m, lapack_int n,
lapack_int k, lapack_int l, const double* v,
lapack_int ldv, const double* t, lapack_int ldt,
double* a, lapack_int lda, double* b, lapack_int ldb,
lapack_int myldwork );
lapack_int LAPACKE_ctprfb( int matrix_order, char side, char trans, char direct,
char storev, lapack_int m, lapack_int n,
lapack_int k, lapack_int l,
const lapack_complex_float* v, lapack_int ldv,
const lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_int myldwork );
lapack_int LAPACKE_ztprfb( int matrix_order, char side, char trans, char direct,
char storev, lapack_int m, lapack_int n,
lapack_int k, lapack_int l,
const lapack_complex_double* v, lapack_int ldv,
const lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_int myldwork );
lapack_int LAPACKE_sgemqrt_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int nb, const float* v, lapack_int ldv,
const float* t, lapack_int ldt, float* c,
lapack_int ldc, float* work );
lapack_int LAPACKE_dgemqrt_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int nb, const double* v, lapack_int ldv,
const double* t, lapack_int ldt, double* c,
lapack_int ldc, double* work );
lapack_int LAPACKE_cgemqrt_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int nb, const lapack_complex_float* v,
lapack_int ldv, const lapack_complex_float* t,
lapack_int ldt, lapack_complex_float* c,
lapack_int ldc, lapack_complex_float* work );
lapack_int LAPACKE_zgemqrt_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int nb, const lapack_complex_double* v,
lapack_int ldv, const lapack_complex_double* t,
lapack_int ldt, lapack_complex_double* c,
lapack_int ldc, lapack_complex_double* work );
lapack_int LAPACKE_sgeqrt_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nb, float* a, lapack_int lda,
float* t, lapack_int ldt, float* work );
lapack_int LAPACKE_dgeqrt_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nb, double* a, lapack_int lda,
double* t, lapack_int ldt, double* work );
lapack_int LAPACKE_cgeqrt_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nb, lapack_complex_float* a,
lapack_int lda, lapack_complex_float* t,
lapack_int ldt, lapack_complex_float* work );
lapack_int LAPACKE_zgeqrt_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int nb, lapack_complex_double* a,
lapack_int lda, lapack_complex_double* t,
lapack_int ldt, lapack_complex_double* work );
lapack_int LAPACKE_sgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* t,
lapack_int ldt );
lapack_int LAPACKE_dgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* t,
lapack_int ldt );
lapack_int LAPACKE_cgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* t, lapack_int ldt );
lapack_int LAPACKE_zgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* t, lapack_int ldt );
lapack_int LAPACKE_sgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* t,
lapack_int ldt );
lapack_int LAPACKE_dgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* t,
lapack_int ldt );
lapack_int LAPACKE_cgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* t, lapack_int ldt );
lapack_int LAPACKE_zgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* t, lapack_int ldt );
lapack_int LAPACKE_stpmqrt_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, lapack_int nb, const float* v,
lapack_int ldv, const float* t, lapack_int ldt,
float* a, lapack_int lda, float* b,
lapack_int ldb, float* work );
lapack_int LAPACKE_dtpmqrt_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, lapack_int nb, const double* v,
lapack_int ldv, const double* t,
lapack_int ldt, double* a, lapack_int lda,
double* b, lapack_int ldb, double* work );
lapack_int LAPACKE_ctpmqrt_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, lapack_int nb,
const lapack_complex_float* v, lapack_int ldv,
const lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* work );
lapack_int LAPACKE_ztpmqrt_work( int matrix_order, char side, char trans,
lapack_int m, lapack_int n, lapack_int k,
lapack_int l, lapack_int nb,
const lapack_complex_double* v, lapack_int ldv,
const lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* work );
lapack_int LAPACKE_dtpqrt_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int l, lapack_int nb, double* a,
lapack_int lda, double* b, lapack_int ldb,
double* t, lapack_int ldt, double* work );
lapack_int LAPACKE_ctpqrt_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int l, lapack_int nb,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* t,
lapack_complex_float* b, lapack_int ldb,
lapack_int ldt, lapack_complex_float* work );
lapack_int LAPACKE_ztpqrt_work( int matrix_order, lapack_int m, lapack_int n,
lapack_int l, lapack_int nb,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* work );
lapack_int LAPACKE_stpqrt2_work( int matrix_order, lapack_int m, lapack_int n,
float* a, lapack_int lda, float* b,
lapack_int ldb, float* t, lapack_int ldt );
lapack_int LAPACKE_dtpqrt2_work( int matrix_order, lapack_int m, lapack_int n,
double* a, lapack_int lda, double* b,
lapack_int ldb, double* t, lapack_int ldt );
lapack_int LAPACKE_ctpqrt2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
lapack_complex_float* t, lapack_int ldt );
lapack_int LAPACKE_ztpqrt2_work( int matrix_order, lapack_int m, lapack_int n,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
lapack_complex_double* t, lapack_int ldt );
lapack_int LAPACKE_stprfb_work( int matrix_order, char side, char trans,
char direct, char storev, lapack_int m,
lapack_int n, lapack_int k, lapack_int l,
const float* v, lapack_int ldv, const float* t,
lapack_int ldt, float* a, lapack_int lda,
float* b, lapack_int ldb, const float* mywork,
lapack_int myldwork );
lapack_int LAPACKE_dtprfb_work( int matrix_order, char side, char trans,
char direct, char storev, lapack_int m,
lapack_int n, lapack_int k, lapack_int l,
const double* v, lapack_int ldv,
const double* t, lapack_int ldt, double* a,
lapack_int lda, double* b, lapack_int ldb,
const double* mywork, lapack_int myldwork );
lapack_int LAPACKE_ctprfb_work( int matrix_order, char side, char trans,
char direct, char storev, lapack_int m,
lapack_int n, lapack_int k, lapack_int l,
const lapack_complex_float* v, lapack_int ldv,
const lapack_complex_float* t, lapack_int ldt,
lapack_complex_float* a, lapack_int lda,
lapack_complex_float* b, lapack_int ldb,
const float* mywork, lapack_int myldwork );
lapack_int LAPACKE_ztprfb_work( int matrix_order, char side, char trans,
char direct, char storev, lapack_int m,
lapack_int n, lapack_int k, lapack_int l,
const lapack_complex_double* v, lapack_int ldv,
const lapack_complex_double* t, lapack_int ldt,
lapack_complex_double* a, lapack_int lda,
lapack_complex_double* b, lapack_int ldb,
const double* mywork, lapack_int myldwork );
//LAPACK 3.X.X
lapack_int LAPACKE_csyr( int matrix_order, char uplo, lapack_int n,
lapack_complex_float alpha,
const lapack_complex_float* x, lapack_int incx,
lapack_complex_float* a, lapack_int lda );
lapack_int LAPACKE_zsyr( int matrix_order, char uplo, lapack_int n,
lapack_complex_double alpha,
const lapack_complex_double* x, lapack_int incx,
lapack_complex_double* a, lapack_int lda );
lapack_int LAPACKE_csyr_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_float alpha,
const lapack_complex_float* x,
lapack_int incx, lapack_complex_float* a,
lapack_int lda );
lapack_int LAPACKE_zsyr_work( int matrix_order, char uplo, lapack_int n,
lapack_complex_double alpha,
const lapack_complex_double* x,
lapack_int incx, lapack_complex_double* a,
lapack_int lda );
#define LAPACK_sgetrf LAPACK_GLOBAL(sgetrf,SGETRF)
#define LAPACK_dgetrf LAPACK_GLOBAL(dgetrf,DGETRF)
#define LAPACK_cgetrf LAPACK_GLOBAL(cgetrf,CGETRF)
#define LAPACK_zgetrf LAPACK_GLOBAL(zgetrf,ZGETRF)
#define LAPACK_sgbtrf LAPACK_GLOBAL(sgbtrf,SGBTRF)
#define LAPACK_dgbtrf LAPACK_GLOBAL(dgbtrf,DGBTRF)
#define LAPACK_cgbtrf LAPACK_GLOBAL(cgbtrf,CGBTRF)
#define LAPACK_zgbtrf LAPACK_GLOBAL(zgbtrf,ZGBTRF)
#define LAPACK_sgttrf LAPACK_GLOBAL(sgttrf,SGTTRF)
#define LAPACK_dgttrf LAPACK_GLOBAL(dgttrf,DGTTRF)
#define LAPACK_cgttrf LAPACK_GLOBAL(cgttrf,CGTTRF)
#define LAPACK_zgttrf LAPACK_GLOBAL(zgttrf,ZGTTRF)
#define LAPACK_spotrf LAPACK_GLOBAL(spotrf,SPOTRF)
#define LAPACK_dpotrf LAPACK_GLOBAL(dpotrf,DPOTRF)
#define LAPACK_cpotrf LAPACK_GLOBAL(cpotrf,CPOTRF)
#define LAPACK_zpotrf LAPACK_GLOBAL(zpotrf,ZPOTRF)
#define LAPACK_dpstrf LAPACK_GLOBAL(dpstrf,DPSTRF)
#define LAPACK_spstrf LAPACK_GLOBAL(spstrf,SPSTRF)
#define LAPACK_zpstrf LAPACK_GLOBAL(zpstrf,ZPSTRF)
#define LAPACK_cpstrf LAPACK_GLOBAL(cpstrf,CPSTRF)
#define LAPACK_dpftrf LAPACK_GLOBAL(dpftrf,DPFTRF)
#define LAPACK_spftrf LAPACK_GLOBAL(spftrf,SPFTRF)
#define LAPACK_zpftrf LAPACK_GLOBAL(zpftrf,ZPFTRF)
#define LAPACK_cpftrf LAPACK_GLOBAL(cpftrf,CPFTRF)
#define LAPACK_spptrf LAPACK_GLOBAL(spptrf,SPPTRF)
#define LAPACK_dpptrf LAPACK_GLOBAL(dpptrf,DPPTRF)
#define LAPACK_cpptrf LAPACK_GLOBAL(cpptrf,CPPTRF)
#define LAPACK_zpptrf LAPACK_GLOBAL(zpptrf,ZPPTRF)
#define LAPACK_spbtrf LAPACK_GLOBAL(spbtrf,SPBTRF)
#define LAPACK_dpbtrf LAPACK_GLOBAL(dpbtrf,DPBTRF)
#define LAPACK_cpbtrf LAPACK_GLOBAL(cpbtrf,CPBTRF)
#define LAPACK_zpbtrf LAPACK_GLOBAL(zpbtrf,ZPBTRF)
#define LAPACK_spttrf LAPACK_GLOBAL(spttrf,SPTTRF)
#define LAPACK_dpttrf LAPACK_GLOBAL(dpttrf,DPTTRF)
#define LAPACK_cpttrf LAPACK_GLOBAL(cpttrf,CPTTRF)
#define LAPACK_zpttrf LAPACK_GLOBAL(zpttrf,ZPTTRF)
#define LAPACK_ssytrf LAPACK_GLOBAL(ssytrf,SSYTRF)
#define LAPACK_dsytrf LAPACK_GLOBAL(dsytrf,DSYTRF)
#define LAPACK_csytrf LAPACK_GLOBAL(csytrf,CSYTRF)
#define LAPACK_zsytrf LAPACK_GLOBAL(zsytrf,ZSYTRF)
#define LAPACK_chetrf LAPACK_GLOBAL(chetrf,CHETRF)
#define LAPACK_zhetrf LAPACK_GLOBAL(zhetrf,ZHETRF)
#define LAPACK_ssptrf LAPACK_GLOBAL(ssptrf,SSPTRF)
#define LAPACK_dsptrf LAPACK_GLOBAL(dsptrf,DSPTRF)
#define LAPACK_csptrf LAPACK_GLOBAL(csptrf,CSPTRF)
#define LAPACK_zsptrf LAPACK_GLOBAL(zsptrf,ZSPTRF)
#define LAPACK_chptrf LAPACK_GLOBAL(chptrf,CHPTRF)
#define LAPACK_zhptrf LAPACK_GLOBAL(zhptrf,ZHPTRF)
#define LAPACK_sgetrs LAPACK_GLOBAL(sgetrs,SGETRS)
#define LAPACK_dgetrs LAPACK_GLOBAL(dgetrs,DGETRS)
#define LAPACK_cgetrs LAPACK_GLOBAL(cgetrs,CGETRS)
#define LAPACK_zgetrs LAPACK_GLOBAL(zgetrs,ZGETRS)
#define LAPACK_sgbtrs LAPACK_GLOBAL(sgbtrs,SGBTRS)
#define LAPACK_dgbtrs LAPACK_GLOBAL(dgbtrs,DGBTRS)
#define LAPACK_cgbtrs LAPACK_GLOBAL(cgbtrs,CGBTRS)
#define LAPACK_zgbtrs LAPACK_GLOBAL(zgbtrs,ZGBTRS)
#define LAPACK_sgttrs LAPACK_GLOBAL(sgttrs,SGTTRS)
#define LAPACK_dgttrs LAPACK_GLOBAL(dgttrs,DGTTRS)
#define LAPACK_cgttrs LAPACK_GLOBAL(cgttrs,CGTTRS)
#define LAPACK_zgttrs LAPACK_GLOBAL(zgttrs,ZGTTRS)
#define LAPACK_spotrs LAPACK_GLOBAL(spotrs,SPOTRS)
#define LAPACK_dpotrs LAPACK_GLOBAL(dpotrs,DPOTRS)
#define LAPACK_cpotrs LAPACK_GLOBAL(cpotrs,CPOTRS)
#define LAPACK_zpotrs LAPACK_GLOBAL(zpotrs,ZPOTRS)
#define LAPACK_dpftrs LAPACK_GLOBAL(dpftrs,DPFTRS)
#define LAPACK_spftrs LAPACK_GLOBAL(spftrs,SPFTRS)
#define LAPACK_zpftrs LAPACK_GLOBAL(zpftrs,ZPFTRS)
#define LAPACK_cpftrs LAPACK_GLOBAL(cpftrs,CPFTRS)
#define LAPACK_spptrs LAPACK_GLOBAL(spptrs,SPPTRS)
#define LAPACK_dpptrs LAPACK_GLOBAL(dpptrs,DPPTRS)
#define LAPACK_cpptrs LAPACK_GLOBAL(cpptrs,CPPTRS)
#define LAPACK_zpptrs LAPACK_GLOBAL(zpptrs,ZPPTRS)
#define LAPACK_spbtrs LAPACK_GLOBAL(spbtrs,SPBTRS)
#define LAPACK_dpbtrs LAPACK_GLOBAL(dpbtrs,DPBTRS)
#define LAPACK_cpbtrs LAPACK_GLOBAL(cpbtrs,CPBTRS)
#define LAPACK_zpbtrs LAPACK_GLOBAL(zpbtrs,ZPBTRS)
#define LAPACK_spttrs LAPACK_GLOBAL(spttrs,SPTTRS)
#define LAPACK_dpttrs LAPACK_GLOBAL(dpttrs,DPTTRS)
#define LAPACK_cpttrs LAPACK_GLOBAL(cpttrs,CPTTRS)
#define LAPACK_zpttrs LAPACK_GLOBAL(zpttrs,ZPTTRS)
#define LAPACK_ssytrs LAPACK_GLOBAL(ssytrs,SSYTRS)
#define LAPACK_dsytrs LAPACK_GLOBAL(dsytrs,DSYTRS)
#define LAPACK_csytrs LAPACK_GLOBAL(csytrs,CSYTRS)
#define LAPACK_zsytrs LAPACK_GLOBAL(zsytrs,ZSYTRS)
#define LAPACK_chetrs LAPACK_GLOBAL(chetrs,CHETRS)
#define LAPACK_zhetrs LAPACK_GLOBAL(zhetrs,ZHETRS)
#define LAPACK_ssptrs LAPACK_GLOBAL(ssptrs,SSPTRS)
#define LAPACK_dsptrs LAPACK_GLOBAL(dsptrs,DSPTRS)
#define LAPACK_csptrs LAPACK_GLOBAL(csptrs,CSPTRS)
#define LAPACK_zsptrs LAPACK_GLOBAL(zsptrs,ZSPTRS)
#define LAPACK_chptrs LAPACK_GLOBAL(chptrs,CHPTRS)
#define LAPACK_zhptrs LAPACK_GLOBAL(zhptrs,ZHPTRS)
#define LAPACK_strtrs LAPACK_GLOBAL(strtrs,STRTRS)
#define LAPACK_dtrtrs LAPACK_GLOBAL(dtrtrs,DTRTRS)
#define LAPACK_ctrtrs LAPACK_GLOBAL(ctrtrs,CTRTRS)
#define LAPACK_ztrtrs LAPACK_GLOBAL(ztrtrs,ZTRTRS)
#define LAPACK_stptrs LAPACK_GLOBAL(stptrs,STPTRS)
#define LAPACK_dtptrs LAPACK_GLOBAL(dtptrs,DTPTRS)
#define LAPACK_ctptrs LAPACK_GLOBAL(ctptrs,CTPTRS)
#define LAPACK_ztptrs LAPACK_GLOBAL(ztptrs,ZTPTRS)
#define LAPACK_stbtrs LAPACK_GLOBAL(stbtrs,STBTRS)
#define LAPACK_dtbtrs LAPACK_GLOBAL(dtbtrs,DTBTRS)
#define LAPACK_ctbtrs LAPACK_GLOBAL(ctbtrs,CTBTRS)
#define LAPACK_ztbtrs LAPACK_GLOBAL(ztbtrs,ZTBTRS)
#define LAPACK_sgecon LAPACK_GLOBAL(sgecon,SGECON)
#define LAPACK_dgecon LAPACK_GLOBAL(dgecon,DGECON)
#define LAPACK_cgecon LAPACK_GLOBAL(cgecon,CGECON)
#define LAPACK_zgecon LAPACK_GLOBAL(zgecon,ZGECON)
#define LAPACK_sgbcon LAPACK_GLOBAL(sgbcon,SGBCON)
#define LAPACK_dgbcon LAPACK_GLOBAL(dgbcon,DGBCON)
#define LAPACK_cgbcon LAPACK_GLOBAL(cgbcon,CGBCON)
#define LAPACK_zgbcon LAPACK_GLOBAL(zgbcon,ZGBCON)
#define LAPACK_sgtcon LAPACK_GLOBAL(sgtcon,SGTCON)
#define LAPACK_dgtcon LAPACK_GLOBAL(dgtcon,DGTCON)
#define LAPACK_cgtcon LAPACK_GLOBAL(cgtcon,CGTCON)
#define LAPACK_zgtcon LAPACK_GLOBAL(zgtcon,ZGTCON)
#define LAPACK_spocon LAPACK_GLOBAL(spocon,SPOCON)
#define LAPACK_dpocon LAPACK_GLOBAL(dpocon,DPOCON)
#define LAPACK_cpocon LAPACK_GLOBAL(cpocon,CPOCON)
#define LAPACK_zpocon LAPACK_GLOBAL(zpocon,ZPOCON)
#define LAPACK_sppcon LAPACK_GLOBAL(sppcon,SPPCON)
#define LAPACK_dppcon LAPACK_GLOBAL(dppcon,DPPCON)
#define LAPACK_cppcon LAPACK_GLOBAL(cppcon,CPPCON)
#define LAPACK_zppcon LAPACK_GLOBAL(zppcon,ZPPCON)
#define LAPACK_spbcon LAPACK_GLOBAL(spbcon,SPBCON)
#define LAPACK_dpbcon LAPACK_GLOBAL(dpbcon,DPBCON)
#define LAPACK_cpbcon LAPACK_GLOBAL(cpbcon,CPBCON)
#define LAPACK_zpbcon LAPACK_GLOBAL(zpbcon,ZPBCON)
#define LAPACK_sptcon LAPACK_GLOBAL(sptcon,SPTCON)
#define LAPACK_dptcon LAPACK_GLOBAL(dptcon,DPTCON)
#define LAPACK_cptcon LAPACK_GLOBAL(cptcon,CPTCON)
#define LAPACK_zptcon LAPACK_GLOBAL(zptcon,ZPTCON)
#define LAPACK_ssycon LAPACK_GLOBAL(ssycon,SSYCON)
#define LAPACK_dsycon LAPACK_GLOBAL(dsycon,DSYCON)
#define LAPACK_csycon LAPACK_GLOBAL(csycon,CSYCON)
#define LAPACK_zsycon LAPACK_GLOBAL(zsycon,ZSYCON)
#define LAPACK_checon LAPACK_GLOBAL(checon,CHECON)
#define LAPACK_zhecon LAPACK_GLOBAL(zhecon,ZHECON)
#define LAPACK_sspcon LAPACK_GLOBAL(sspcon,SSPCON)
#define LAPACK_dspcon LAPACK_GLOBAL(dspcon,DSPCON)
#define LAPACK_cspcon LAPACK_GLOBAL(cspcon,CSPCON)
#define LAPACK_zspcon LAPACK_GLOBAL(zspcon,ZSPCON)
#define LAPACK_chpcon LAPACK_GLOBAL(chpcon,CHPCON)
#define LAPACK_zhpcon LAPACK_GLOBAL(zhpcon,ZHPCON)
#define LAPACK_strcon LAPACK_GLOBAL(strcon,STRCON)
#define LAPACK_dtrcon LAPACK_GLOBAL(dtrcon,DTRCON)
#define LAPACK_ctrcon LAPACK_GLOBAL(ctrcon,CTRCON)
#define LAPACK_ztrcon LAPACK_GLOBAL(ztrcon,ZTRCON)
#define LAPACK_stpcon LAPACK_GLOBAL(stpcon,STPCON)
#define LAPACK_dtpcon LAPACK_GLOBAL(dtpcon,DTPCON)
#define LAPACK_ctpcon LAPACK_GLOBAL(ctpcon,CTPCON)
#define LAPACK_ztpcon LAPACK_GLOBAL(ztpcon,ZTPCON)
#define LAPACK_stbcon LAPACK_GLOBAL(stbcon,STBCON)
#define LAPACK_dtbcon LAPACK_GLOBAL(dtbcon,DTBCON)
#define LAPACK_ctbcon LAPACK_GLOBAL(ctbcon,CTBCON)
#define LAPACK_ztbcon LAPACK_GLOBAL(ztbcon,ZTBCON)
#define LAPACK_sgerfs LAPACK_GLOBAL(sgerfs,SGERFS)
#define LAPACK_dgerfs LAPACK_GLOBAL(dgerfs,DGERFS)
#define LAPACK_cgerfs LAPACK_GLOBAL(cgerfs,CGERFS)
#define LAPACK_zgerfs LAPACK_GLOBAL(zgerfs,ZGERFS)
#define LAPACK_dgerfsx LAPACK_GLOBAL(dgerfsx,DGERFSX)
#define LAPACK_sgerfsx LAPACK_GLOBAL(sgerfsx,SGERFSX)
#define LAPACK_zgerfsx LAPACK_GLOBAL(zgerfsx,ZGERFSX)
#define LAPACK_cgerfsx LAPACK_GLOBAL(cgerfsx,CGERFSX)
#define LAPACK_sgbrfs LAPACK_GLOBAL(sgbrfs,SGBRFS)
#define LAPACK_dgbrfs LAPACK_GLOBAL(dgbrfs,DGBRFS)
#define LAPACK_cgbrfs LAPACK_GLOBAL(cgbrfs,CGBRFS)
#define LAPACK_zgbrfs LAPACK_GLOBAL(zgbrfs,ZGBRFS)
#define LAPACK_dgbrfsx LAPACK_GLOBAL(dgbrfsx,DGBRFSX)
#define LAPACK_sgbrfsx LAPACK_GLOBAL(sgbrfsx,SGBRFSX)
#define LAPACK_zgbrfsx LAPACK_GLOBAL(zgbrfsx,ZGBRFSX)
#define LAPACK_cgbrfsx LAPACK_GLOBAL(cgbrfsx,CGBRFSX)
#define LAPACK_sgtrfs LAPACK_GLOBAL(sgtrfs,SGTRFS)
#define LAPACK_dgtrfs LAPACK_GLOBAL(dgtrfs,DGTRFS)
#define LAPACK_cgtrfs LAPACK_GLOBAL(cgtrfs,CGTRFS)
#define LAPACK_zgtrfs LAPACK_GLOBAL(zgtrfs,ZGTRFS)
#define LAPACK_sporfs LAPACK_GLOBAL(sporfs,SPORFS)
#define LAPACK_dporfs LAPACK_GLOBAL(dporfs,DPORFS)
#define LAPACK_cporfs LAPACK_GLOBAL(cporfs,CPORFS)
#define LAPACK_zporfs LAPACK_GLOBAL(zporfs,ZPORFS)
#define LAPACK_dporfsx LAPACK_GLOBAL(dporfsx,DPORFSX)
#define LAPACK_sporfsx LAPACK_GLOBAL(sporfsx,SPORFSX)
#define LAPACK_zporfsx LAPACK_GLOBAL(zporfsx,ZPORFSX)
#define LAPACK_cporfsx LAPACK_GLOBAL(cporfsx,CPORFSX)
#define LAPACK_spprfs LAPACK_GLOBAL(spprfs,SPPRFS)
#define LAPACK_dpprfs LAPACK_GLOBAL(dpprfs,DPPRFS)
#define LAPACK_cpprfs LAPACK_GLOBAL(cpprfs,CPPRFS)
#define LAPACK_zpprfs LAPACK_GLOBAL(zpprfs,ZPPRFS)
#define LAPACK_spbrfs LAPACK_GLOBAL(spbrfs,SPBRFS)
#define LAPACK_dpbrfs LAPACK_GLOBAL(dpbrfs,DPBRFS)
#define LAPACK_cpbrfs LAPACK_GLOBAL(cpbrfs,CPBRFS)
#define LAPACK_zpbrfs LAPACK_GLOBAL(zpbrfs,ZPBRFS)
#define LAPACK_sptrfs LAPACK_GLOBAL(sptrfs,SPTRFS)
#define LAPACK_dptrfs LAPACK_GLOBAL(dptrfs,DPTRFS)
#define LAPACK_cptrfs LAPACK_GLOBAL(cptrfs,CPTRFS)
#define LAPACK_zptrfs LAPACK_GLOBAL(zptrfs,ZPTRFS)
#define LAPACK_ssyrfs LAPACK_GLOBAL(ssyrfs,SSYRFS)
#define LAPACK_dsyrfs LAPACK_GLOBAL(dsyrfs,DSYRFS)
#define LAPACK_csyrfs LAPACK_GLOBAL(csyrfs,CSYRFS)
#define LAPACK_zsyrfs LAPACK_GLOBAL(zsyrfs,ZSYRFS)
#define LAPACK_dsyrfsx LAPACK_GLOBAL(dsyrfsx,DSYRFSX)
#define LAPACK_ssyrfsx LAPACK_GLOBAL(ssyrfsx,SSYRFSX)
#define LAPACK_zsyrfsx LAPACK_GLOBAL(zsyrfsx,ZSYRFSX)
#define LAPACK_csyrfsx LAPACK_GLOBAL(csyrfsx,CSYRFSX)
#define LAPACK_cherfs LAPACK_GLOBAL(cherfs,CHERFS)
#define LAPACK_zherfs LAPACK_GLOBAL(zherfs,ZHERFS)
#define LAPACK_zherfsx LAPACK_GLOBAL(zherfsx,ZHERFSX)
#define LAPACK_cherfsx LAPACK_GLOBAL(cherfsx,CHERFSX)
#define LAPACK_ssprfs LAPACK_GLOBAL(ssprfs,SSPRFS)
#define LAPACK_dsprfs LAPACK_GLOBAL(dsprfs,DSPRFS)
#define LAPACK_csprfs LAPACK_GLOBAL(csprfs,CSPRFS)
#define LAPACK_zsprfs LAPACK_GLOBAL(zsprfs,ZSPRFS)
#define LAPACK_chprfs LAPACK_GLOBAL(chprfs,CHPRFS)
#define LAPACK_zhprfs LAPACK_GLOBAL(zhprfs,ZHPRFS)
#define LAPACK_strrfs LAPACK_GLOBAL(strrfs,STRRFS)
#define LAPACK_dtrrfs LAPACK_GLOBAL(dtrrfs,DTRRFS)
#define LAPACK_ctrrfs LAPACK_GLOBAL(ctrrfs,CTRRFS)
#define LAPACK_ztrrfs LAPACK_GLOBAL(ztrrfs,ZTRRFS)
#define LAPACK_stprfs LAPACK_GLOBAL(stprfs,STPRFS)
#define LAPACK_dtprfs LAPACK_GLOBAL(dtprfs,DTPRFS)
#define LAPACK_ctprfs LAPACK_GLOBAL(ctprfs,CTPRFS)
#define LAPACK_ztprfs LAPACK_GLOBAL(ztprfs,ZTPRFS)
#define LAPACK_stbrfs LAPACK_GLOBAL(stbrfs,STBRFS)
#define LAPACK_dtbrfs LAPACK_GLOBAL(dtbrfs,DTBRFS)
#define LAPACK_ctbrfs LAPACK_GLOBAL(ctbrfs,CTBRFS)
#define LAPACK_ztbrfs LAPACK_GLOBAL(ztbrfs,ZTBRFS)
#define LAPACK_sgetri LAPACK_GLOBAL(sgetri,SGETRI)
#define LAPACK_dgetri LAPACK_GLOBAL(dgetri,DGETRI)
#define LAPACK_cgetri LAPACK_GLOBAL(cgetri,CGETRI)
#define LAPACK_zgetri LAPACK_GLOBAL(zgetri,ZGETRI)
#define LAPACK_spotri LAPACK_GLOBAL(spotri,SPOTRI)
#define LAPACK_dpotri LAPACK_GLOBAL(dpotri,DPOTRI)
#define LAPACK_cpotri LAPACK_GLOBAL(cpotri,CPOTRI)
#define LAPACK_zpotri LAPACK_GLOBAL(zpotri,ZPOTRI)
#define LAPACK_dpftri LAPACK_GLOBAL(dpftri,DPFTRI)
#define LAPACK_spftri LAPACK_GLOBAL(spftri,SPFTRI)
#define LAPACK_zpftri LAPACK_GLOBAL(zpftri,ZPFTRI)
#define LAPACK_cpftri LAPACK_GLOBAL(cpftri,CPFTRI)
#define LAPACK_spptri LAPACK_GLOBAL(spptri,SPPTRI)
#define LAPACK_dpptri LAPACK_GLOBAL(dpptri,DPPTRI)
#define LAPACK_cpptri LAPACK_GLOBAL(cpptri,CPPTRI)
#define LAPACK_zpptri LAPACK_GLOBAL(zpptri,ZPPTRI)
#define LAPACK_ssytri LAPACK_GLOBAL(ssytri,SSYTRI)
#define LAPACK_dsytri LAPACK_GLOBAL(dsytri,DSYTRI)
#define LAPACK_csytri LAPACK_GLOBAL(csytri,CSYTRI)
#define LAPACK_zsytri LAPACK_GLOBAL(zsytri,ZSYTRI)
#define LAPACK_chetri LAPACK_GLOBAL(chetri,CHETRI)
#define LAPACK_zhetri LAPACK_GLOBAL(zhetri,ZHETRI)
#define LAPACK_ssptri LAPACK_GLOBAL(ssptri,SSPTRI)
#define LAPACK_dsptri LAPACK_GLOBAL(dsptri,DSPTRI)
#define LAPACK_csptri LAPACK_GLOBAL(csptri,CSPTRI)
#define LAPACK_zsptri LAPACK_GLOBAL(zsptri,ZSPTRI)
#define LAPACK_chptri LAPACK_GLOBAL(chptri,CHPTRI)
#define LAPACK_zhptri LAPACK_GLOBAL(zhptri,ZHPTRI)
#define LAPACK_strtri LAPACK_GLOBAL(strtri,STRTRI)
#define LAPACK_dtrtri LAPACK_GLOBAL(dtrtri,DTRTRI)
#define LAPACK_ctrtri LAPACK_GLOBAL(ctrtri,CTRTRI)
#define LAPACK_ztrtri LAPACK_GLOBAL(ztrtri,ZTRTRI)
#define LAPACK_dtftri LAPACK_GLOBAL(dtftri,DTFTRI)
#define LAPACK_stftri LAPACK_GLOBAL(stftri,STFTRI)
#define LAPACK_ztftri LAPACK_GLOBAL(ztftri,ZTFTRI)
#define LAPACK_ctftri LAPACK_GLOBAL(ctftri,CTFTRI)
#define LAPACK_stptri LAPACK_GLOBAL(stptri,STPTRI)
#define LAPACK_dtptri LAPACK_GLOBAL(dtptri,DTPTRI)
#define LAPACK_ctptri LAPACK_GLOBAL(ctptri,CTPTRI)
#define LAPACK_ztptri LAPACK_GLOBAL(ztptri,ZTPTRI)
#define LAPACK_sgeequ LAPACK_GLOBAL(sgeequ,SGEEQU)
#define LAPACK_dgeequ LAPACK_GLOBAL(dgeequ,DGEEQU)
#define LAPACK_cgeequ LAPACK_GLOBAL(cgeequ,CGEEQU)
#define LAPACK_zgeequ LAPACK_GLOBAL(zgeequ,ZGEEQU)
#define LAPACK_dgeequb LAPACK_GLOBAL(dgeequb,DGEEQUB)
#define LAPACK_sgeequb LAPACK_GLOBAL(sgeequb,SGEEQUB)
#define LAPACK_zgeequb LAPACK_GLOBAL(zgeequb,ZGEEQUB)
#define LAPACK_cgeequb LAPACK_GLOBAL(cgeequb,CGEEQUB)
#define LAPACK_sgbequ LAPACK_GLOBAL(sgbequ,SGBEQU)
#define LAPACK_dgbequ LAPACK_GLOBAL(dgbequ,DGBEQU)
#define LAPACK_cgbequ LAPACK_GLOBAL(cgbequ,CGBEQU)
#define LAPACK_zgbequ LAPACK_GLOBAL(zgbequ,ZGBEQU)
#define LAPACK_dgbequb LAPACK_GLOBAL(dgbequb,DGBEQUB)
#define LAPACK_sgbequb LAPACK_GLOBAL(sgbequb,SGBEQUB)
#define LAPACK_zgbequb LAPACK_GLOBAL(zgbequb,ZGBEQUB)
#define LAPACK_cgbequb LAPACK_GLOBAL(cgbequb,CGBEQUB)
#define LAPACK_spoequ LAPACK_GLOBAL(spoequ,SPOEQU)
#define LAPACK_dpoequ LAPACK_GLOBAL(dpoequ,DPOEQU)
#define LAPACK_cpoequ LAPACK_GLOBAL(cpoequ,CPOEQU)
#define LAPACK_zpoequ LAPACK_GLOBAL(zpoequ,ZPOEQU)
#define LAPACK_dpoequb LAPACK_GLOBAL(dpoequb,DPOEQUB)
#define LAPACK_spoequb LAPACK_GLOBAL(spoequb,SPOEQUB)
#define LAPACK_zpoequb LAPACK_GLOBAL(zpoequb,ZPOEQUB)
#define LAPACK_cpoequb LAPACK_GLOBAL(cpoequb,CPOEQUB)
#define LAPACK_sppequ LAPACK_GLOBAL(sppequ,SPPEQU)
#define LAPACK_dppequ LAPACK_GLOBAL(dppequ,DPPEQU)
#define LAPACK_cppequ LAPACK_GLOBAL(cppequ,CPPEQU)
#define LAPACK_zppequ LAPACK_GLOBAL(zppequ,ZPPEQU)
#define LAPACK_spbequ LAPACK_GLOBAL(spbequ,SPBEQU)
#define LAPACK_dpbequ LAPACK_GLOBAL(dpbequ,DPBEQU)
#define LAPACK_cpbequ LAPACK_GLOBAL(cpbequ,CPBEQU)
#define LAPACK_zpbequ LAPACK_GLOBAL(zpbequ,ZPBEQU)
#define LAPACK_dsyequb LAPACK_GLOBAL(dsyequb,DSYEQUB)
#define LAPACK_ssyequb LAPACK_GLOBAL(ssyequb,SSYEQUB)
#define LAPACK_zsyequb LAPACK_GLOBAL(zsyequb,ZSYEQUB)
#define LAPACK_csyequb LAPACK_GLOBAL(csyequb,CSYEQUB)
#define LAPACK_zheequb LAPACK_GLOBAL(zheequb,ZHEEQUB)
#define LAPACK_cheequb LAPACK_GLOBAL(cheequb,CHEEQUB)
#define LAPACK_sgesv LAPACK_GLOBAL(sgesv,SGESV)
#define LAPACK_dgesv LAPACK_GLOBAL(dgesv,DGESV)
#define LAPACK_cgesv LAPACK_GLOBAL(cgesv,CGESV)
#define LAPACK_zgesv LAPACK_GLOBAL(zgesv,ZGESV)
#define LAPACK_dsgesv LAPACK_GLOBAL(dsgesv,DSGESV)
#define LAPACK_zcgesv LAPACK_GLOBAL(zcgesv,ZCGESV)
#define LAPACK_sgesvx LAPACK_GLOBAL(sgesvx,SGESVX)
#define LAPACK_dgesvx LAPACK_GLOBAL(dgesvx,DGESVX)
#define LAPACK_cgesvx LAPACK_GLOBAL(cgesvx,CGESVX)
#define LAPACK_zgesvx LAPACK_GLOBAL(zgesvx,ZGESVX)
#define LAPACK_dgesvxx LAPACK_GLOBAL(dgesvxx,DGESVXX)
#define LAPACK_sgesvxx LAPACK_GLOBAL(sgesvxx,SGESVXX)
#define LAPACK_zgesvxx LAPACK_GLOBAL(zgesvxx,ZGESVXX)
#define LAPACK_cgesvxx LAPACK_GLOBAL(cgesvxx,CGESVXX)
#define LAPACK_sgbsv LAPACK_GLOBAL(sgbsv,SGBSV)
#define LAPACK_dgbsv LAPACK_GLOBAL(dgbsv,DGBSV)
#define LAPACK_cgbsv LAPACK_GLOBAL(cgbsv,CGBSV)
#define LAPACK_zgbsv LAPACK_GLOBAL(zgbsv,ZGBSV)
#define LAPACK_sgbsvx LAPACK_GLOBAL(sgbsvx,SGBSVX)
#define LAPACK_dgbsvx LAPACK_GLOBAL(dgbsvx,DGBSVX)
#define LAPACK_cgbsvx LAPACK_GLOBAL(cgbsvx,CGBSVX)
#define LAPACK_zgbsvx LAPACK_GLOBAL(zgbsvx,ZGBSVX)
#define LAPACK_dgbsvxx LAPACK_GLOBAL(dgbsvxx,DGBSVXX)
#define LAPACK_sgbsvxx LAPACK_GLOBAL(sgbsvxx,SGBSVXX)
#define LAPACK_zgbsvxx LAPACK_GLOBAL(zgbsvxx,ZGBSVXX)
#define LAPACK_cgbsvxx LAPACK_GLOBAL(cgbsvxx,CGBSVXX)
#define LAPACK_sgtsv LAPACK_GLOBAL(sgtsv,SGTSV)
#define LAPACK_dgtsv LAPACK_GLOBAL(dgtsv,DGTSV)
#define LAPACK_cgtsv LAPACK_GLOBAL(cgtsv,CGTSV)
#define LAPACK_zgtsv LAPACK_GLOBAL(zgtsv,ZGTSV)
#define LAPACK_sgtsvx LAPACK_GLOBAL(sgtsvx,SGTSVX)
#define LAPACK_dgtsvx LAPACK_GLOBAL(dgtsvx,DGTSVX)
#define LAPACK_cgtsvx LAPACK_GLOBAL(cgtsvx,CGTSVX)
#define LAPACK_zgtsvx LAPACK_GLOBAL(zgtsvx,ZGTSVX)
#define LAPACK_sposv LAPACK_GLOBAL(sposv,SPOSV)
#define LAPACK_dposv LAPACK_GLOBAL(dposv,DPOSV)
#define LAPACK_cposv LAPACK_GLOBAL(cposv,CPOSV)
#define LAPACK_zposv LAPACK_GLOBAL(zposv,ZPOSV)
#define LAPACK_dsposv LAPACK_GLOBAL(dsposv,DSPOSV)
#define LAPACK_zcposv LAPACK_GLOBAL(zcposv,ZCPOSV)
#define LAPACK_sposvx LAPACK_GLOBAL(sposvx,SPOSVX)
#define LAPACK_dposvx LAPACK_GLOBAL(dposvx,DPOSVX)
#define LAPACK_cposvx LAPACK_GLOBAL(cposvx,CPOSVX)
#define LAPACK_zposvx LAPACK_GLOBAL(zposvx,ZPOSVX)
#define LAPACK_dposvxx LAPACK_GLOBAL(dposvxx,DPOSVXX)
#define LAPACK_sposvxx LAPACK_GLOBAL(sposvxx,SPOSVXX)
#define LAPACK_zposvxx LAPACK_GLOBAL(zposvxx,ZPOSVXX)
#define LAPACK_cposvxx LAPACK_GLOBAL(cposvxx,CPOSVXX)
#define LAPACK_sppsv LAPACK_GLOBAL(sppsv,SPPSV)
#define LAPACK_dppsv LAPACK_GLOBAL(dppsv,DPPSV)
#define LAPACK_cppsv LAPACK_GLOBAL(cppsv,CPPSV)
#define LAPACK_zppsv LAPACK_GLOBAL(zppsv,ZPPSV)
#define LAPACK_sppsvx LAPACK_GLOBAL(sppsvx,SPPSVX)
#define LAPACK_dppsvx LAPACK_GLOBAL(dppsvx,DPPSVX)
#define LAPACK_cppsvx LAPACK_GLOBAL(cppsvx,CPPSVX)
#define LAPACK_zppsvx LAPACK_GLOBAL(zppsvx,ZPPSVX)
#define LAPACK_spbsv LAPACK_GLOBAL(spbsv,SPBSV)
#define LAPACK_dpbsv LAPACK_GLOBAL(dpbsv,DPBSV)
#define LAPACK_cpbsv LAPACK_GLOBAL(cpbsv,CPBSV)
#define LAPACK_zpbsv LAPACK_GLOBAL(zpbsv,ZPBSV)
#define LAPACK_spbsvx LAPACK_GLOBAL(spbsvx,SPBSVX)
#define LAPACK_dpbsvx LAPACK_GLOBAL(dpbsvx,DPBSVX)
#define LAPACK_cpbsvx LAPACK_GLOBAL(cpbsvx,CPBSVX)
#define LAPACK_zpbsvx LAPACK_GLOBAL(zpbsvx,ZPBSVX)
#define LAPACK_sptsv LAPACK_GLOBAL(sptsv,SPTSV)
#define LAPACK_dptsv LAPACK_GLOBAL(dptsv,DPTSV)
#define LAPACK_cptsv LAPACK_GLOBAL(cptsv,CPTSV)
#define LAPACK_zptsv LAPACK_GLOBAL(zptsv,ZPTSV)
#define LAPACK_sptsvx LAPACK_GLOBAL(sptsvx,SPTSVX)
#define LAPACK_dptsvx LAPACK_GLOBAL(dptsvx,DPTSVX)
#define LAPACK_cptsvx LAPACK_GLOBAL(cptsvx,CPTSVX)
#define LAPACK_zptsvx LAPACK_GLOBAL(zptsvx,ZPTSVX)
#define LAPACK_ssysv LAPACK_GLOBAL(ssysv,SSYSV)
#define LAPACK_dsysv LAPACK_GLOBAL(dsysv,DSYSV)
#define LAPACK_csysv LAPACK_GLOBAL(csysv,CSYSV)
#define LAPACK_zsysv LAPACK_GLOBAL(zsysv,ZSYSV)
#define LAPACK_ssysvx LAPACK_GLOBAL(ssysvx,SSYSVX)
#define LAPACK_dsysvx LAPACK_GLOBAL(dsysvx,DSYSVX)
#define LAPACK_csysvx LAPACK_GLOBAL(csysvx,CSYSVX)
#define LAPACK_zsysvx LAPACK_GLOBAL(zsysvx,ZSYSVX)
#define LAPACK_dsysvxx LAPACK_GLOBAL(dsysvxx,DSYSVXX)
#define LAPACK_ssysvxx LAPACK_GLOBAL(ssysvxx,SSYSVXX)
#define LAPACK_zsysvxx LAPACK_GLOBAL(zsysvxx,ZSYSVXX)
#define LAPACK_csysvxx LAPACK_GLOBAL(csysvxx,CSYSVXX)
#define LAPACK_chesv LAPACK_GLOBAL(chesv,CHESV)
#define LAPACK_zhesv LAPACK_GLOBAL(zhesv,ZHESV)
#define LAPACK_chesvx LAPACK_GLOBAL(chesvx,CHESVX)
#define LAPACK_zhesvx LAPACK_GLOBAL(zhesvx,ZHESVX)
#define LAPACK_zhesvxx LAPACK_GLOBAL(zhesvxx,ZHESVXX)
#define LAPACK_chesvxx LAPACK_GLOBAL(chesvxx,CHESVXX)
#define LAPACK_sspsv LAPACK_GLOBAL(sspsv,SSPSV)
#define LAPACK_dspsv LAPACK_GLOBAL(dspsv,DSPSV)
#define LAPACK_cspsv LAPACK_GLOBAL(cspsv,CSPSV)
#define LAPACK_zspsv LAPACK_GLOBAL(zspsv,ZSPSV)
#define LAPACK_sspsvx LAPACK_GLOBAL(sspsvx,SSPSVX)
#define LAPACK_dspsvx LAPACK_GLOBAL(dspsvx,DSPSVX)
#define LAPACK_cspsvx LAPACK_GLOBAL(cspsvx,CSPSVX)
#define LAPACK_zspsvx LAPACK_GLOBAL(zspsvx,ZSPSVX)
#define LAPACK_chpsv LAPACK_GLOBAL(chpsv,CHPSV)
#define LAPACK_zhpsv LAPACK_GLOBAL(zhpsv,ZHPSV)
#define LAPACK_chpsvx LAPACK_GLOBAL(chpsvx,CHPSVX)
#define LAPACK_zhpsvx LAPACK_GLOBAL(zhpsvx,ZHPSVX)
#define LAPACK_sgeqrf LAPACK_GLOBAL(sgeqrf,SGEQRF)
#define LAPACK_dgeqrf LAPACK_GLOBAL(dgeqrf,DGEQRF)
#define LAPACK_cgeqrf LAPACK_GLOBAL(cgeqrf,CGEQRF)
#define LAPACK_zgeqrf LAPACK_GLOBAL(zgeqrf,ZGEQRF)
#define LAPACK_sgeqpf LAPACK_GLOBAL(sgeqpf,SGEQPF)
#define LAPACK_dgeqpf LAPACK_GLOBAL(dgeqpf,DGEQPF)
#define LAPACK_cgeqpf LAPACK_GLOBAL(cgeqpf,CGEQPF)
#define LAPACK_zgeqpf LAPACK_GLOBAL(zgeqpf,ZGEQPF)
#define LAPACK_sgeqp3 LAPACK_GLOBAL(sgeqp3,SGEQP3)
#define LAPACK_dgeqp3 LAPACK_GLOBAL(dgeqp3,DGEQP3)
#define LAPACK_cgeqp3 LAPACK_GLOBAL(cgeqp3,CGEQP3)
#define LAPACK_zgeqp3 LAPACK_GLOBAL(zgeqp3,ZGEQP3)
#define LAPACK_sorgqr LAPACK_GLOBAL(sorgqr,SORGQR)
#define LAPACK_dorgqr LAPACK_GLOBAL(dorgqr,DORGQR)
#define LAPACK_sormqr LAPACK_GLOBAL(sormqr,SORMQR)
#define LAPACK_dormqr LAPACK_GLOBAL(dormqr,DORMQR)
#define LAPACK_cungqr LAPACK_GLOBAL(cungqr,CUNGQR)
#define LAPACK_zungqr LAPACK_GLOBAL(zungqr,ZUNGQR)
#define LAPACK_cunmqr LAPACK_GLOBAL(cunmqr,CUNMQR)
#define LAPACK_zunmqr LAPACK_GLOBAL(zunmqr,ZUNMQR)
#define LAPACK_sgelqf LAPACK_GLOBAL(sgelqf,SGELQF)
#define LAPACK_dgelqf LAPACK_GLOBAL(dgelqf,DGELQF)
#define LAPACK_cgelqf LAPACK_GLOBAL(cgelqf,CGELQF)
#define LAPACK_zgelqf LAPACK_GLOBAL(zgelqf,ZGELQF)
#define LAPACK_sorglq LAPACK_GLOBAL(sorglq,SORGLQ)
#define LAPACK_dorglq LAPACK_GLOBAL(dorglq,DORGLQ)
#define LAPACK_sormlq LAPACK_GLOBAL(sormlq,SORMLQ)
#define LAPACK_dormlq LAPACK_GLOBAL(dormlq,DORMLQ)
#define LAPACK_cunglq LAPACK_GLOBAL(cunglq,CUNGLQ)
#define LAPACK_zunglq LAPACK_GLOBAL(zunglq,ZUNGLQ)
#define LAPACK_cunmlq LAPACK_GLOBAL(cunmlq,CUNMLQ)
#define LAPACK_zunmlq LAPACK_GLOBAL(zunmlq,ZUNMLQ)
#define LAPACK_sgeqlf LAPACK_GLOBAL(sgeqlf,SGEQLF)
#define LAPACK_dgeqlf LAPACK_GLOBAL(dgeqlf,DGEQLF)
#define LAPACK_cgeqlf LAPACK_GLOBAL(cgeqlf,CGEQLF)
#define LAPACK_zgeqlf LAPACK_GLOBAL(zgeqlf,ZGEQLF)
#define LAPACK_sorgql LAPACK_GLOBAL(sorgql,SORGQL)
#define LAPACK_dorgql LAPACK_GLOBAL(dorgql,DORGQL)
#define LAPACK_cungql LAPACK_GLOBAL(cungql,CUNGQL)
#define LAPACK_zungql LAPACK_GLOBAL(zungql,ZUNGQL)
#define LAPACK_sormql LAPACK_GLOBAL(sormql,SORMQL)
#define LAPACK_dormql LAPACK_GLOBAL(dormql,DORMQL)
#define LAPACK_cunmql LAPACK_GLOBAL(cunmql,CUNMQL)
#define LAPACK_zunmql LAPACK_GLOBAL(zunmql,ZUNMQL)
#define LAPACK_sgerqf LAPACK_GLOBAL(sgerqf,SGERQF)
#define LAPACK_dgerqf LAPACK_GLOBAL(dgerqf,DGERQF)
#define LAPACK_cgerqf LAPACK_GLOBAL(cgerqf,CGERQF)
#define LAPACK_zgerqf LAPACK_GLOBAL(zgerqf,ZGERQF)
#define LAPACK_sorgrq LAPACK_GLOBAL(sorgrq,SORGRQ)
#define LAPACK_dorgrq LAPACK_GLOBAL(dorgrq,DORGRQ)
#define LAPACK_cungrq LAPACK_GLOBAL(cungrq,CUNGRQ)
#define LAPACK_zungrq LAPACK_GLOBAL(zungrq,ZUNGRQ)
#define LAPACK_sormrq LAPACK_GLOBAL(sormrq,SORMRQ)
#define LAPACK_dormrq LAPACK_GLOBAL(dormrq,DORMRQ)
#define LAPACK_cunmrq LAPACK_GLOBAL(cunmrq,CUNMRQ)
#define LAPACK_zunmrq LAPACK_GLOBAL(zunmrq,ZUNMRQ)
#define LAPACK_stzrzf LAPACK_GLOBAL(stzrzf,STZRZF)
#define LAPACK_dtzrzf LAPACK_GLOBAL(dtzrzf,DTZRZF)
#define LAPACK_ctzrzf LAPACK_GLOBAL(ctzrzf,CTZRZF)
#define LAPACK_ztzrzf LAPACK_GLOBAL(ztzrzf,ZTZRZF)
#define LAPACK_sormrz LAPACK_GLOBAL(sormrz,SORMRZ)
#define LAPACK_dormrz LAPACK_GLOBAL(dormrz,DORMRZ)
#define LAPACK_cunmrz LAPACK_GLOBAL(cunmrz,CUNMRZ)
#define LAPACK_zunmrz LAPACK_GLOBAL(zunmrz,ZUNMRZ)
#define LAPACK_sggqrf LAPACK_GLOBAL(sggqrf,SGGQRF)
#define LAPACK_dggqrf LAPACK_GLOBAL(dggqrf,DGGQRF)
#define LAPACK_cggqrf LAPACK_GLOBAL(cggqrf,CGGQRF)
#define LAPACK_zggqrf LAPACK_GLOBAL(zggqrf,ZGGQRF)
#define LAPACK_sggrqf LAPACK_GLOBAL(sggrqf,SGGRQF)
#define LAPACK_dggrqf LAPACK_GLOBAL(dggrqf,DGGRQF)
#define LAPACK_cggrqf LAPACK_GLOBAL(cggrqf,CGGRQF)
#define LAPACK_zggrqf LAPACK_GLOBAL(zggrqf,ZGGRQF)
#define LAPACK_sgebrd LAPACK_GLOBAL(sgebrd,SGEBRD)
#define LAPACK_dgebrd LAPACK_GLOBAL(dgebrd,DGEBRD)
#define LAPACK_cgebrd LAPACK_GLOBAL(cgebrd,CGEBRD)
#define LAPACK_zgebrd LAPACK_GLOBAL(zgebrd,ZGEBRD)
#define LAPACK_sgbbrd LAPACK_GLOBAL(sgbbrd,SGBBRD)
#define LAPACK_dgbbrd LAPACK_GLOBAL(dgbbrd,DGBBRD)
#define LAPACK_cgbbrd LAPACK_GLOBAL(cgbbrd,CGBBRD)
#define LAPACK_zgbbrd LAPACK_GLOBAL(zgbbrd,ZGBBRD)
#define LAPACK_sorgbr LAPACK_GLOBAL(sorgbr,SORGBR)
#define LAPACK_dorgbr LAPACK_GLOBAL(dorgbr,DORGBR)
#define LAPACK_sormbr LAPACK_GLOBAL(sormbr,SORMBR)
#define LAPACK_dormbr LAPACK_GLOBAL(dormbr,DORMBR)
#define LAPACK_cungbr LAPACK_GLOBAL(cungbr,CUNGBR)
#define LAPACK_zungbr LAPACK_GLOBAL(zungbr,ZUNGBR)
#define LAPACK_cunmbr LAPACK_GLOBAL(cunmbr,CUNMBR)
#define LAPACK_zunmbr LAPACK_GLOBAL(zunmbr,ZUNMBR)
#define LAPACK_sbdsqr LAPACK_GLOBAL(sbdsqr,SBDSQR)
#define LAPACK_dbdsqr LAPACK_GLOBAL(dbdsqr,DBDSQR)
#define LAPACK_cbdsqr LAPACK_GLOBAL(cbdsqr,CBDSQR)
#define LAPACK_zbdsqr LAPACK_GLOBAL(zbdsqr,ZBDSQR)
#define LAPACK_sbdsdc LAPACK_GLOBAL(sbdsdc,SBDSDC)
#define LAPACK_dbdsdc LAPACK_GLOBAL(dbdsdc,DBDSDC)
#define LAPACK_ssytrd LAPACK_GLOBAL(ssytrd,SSYTRD)
#define LAPACK_dsytrd LAPACK_GLOBAL(dsytrd,DSYTRD)
#define LAPACK_sorgtr LAPACK_GLOBAL(sorgtr,SORGTR)
#define LAPACK_dorgtr LAPACK_GLOBAL(dorgtr,DORGTR)
#define LAPACK_sormtr LAPACK_GLOBAL(sormtr,SORMTR)
#define LAPACK_dormtr LAPACK_GLOBAL(dormtr,DORMTR)
#define LAPACK_chetrd LAPACK_GLOBAL(chetrd,CHETRD)
#define LAPACK_zhetrd LAPACK_GLOBAL(zhetrd,ZHETRD)
#define LAPACK_cungtr LAPACK_GLOBAL(cungtr,CUNGTR)
#define LAPACK_zungtr LAPACK_GLOBAL(zungtr,ZUNGTR)
#define LAPACK_cunmtr LAPACK_GLOBAL(cunmtr,CUNMTR)
#define LAPACK_zunmtr LAPACK_GLOBAL(zunmtr,ZUNMTR)
#define LAPACK_ssptrd LAPACK_GLOBAL(ssptrd,SSPTRD)
#define LAPACK_dsptrd LAPACK_GLOBAL(dsptrd,DSPTRD)
#define LAPACK_sopgtr LAPACK_GLOBAL(sopgtr,SOPGTR)
#define LAPACK_dopgtr LAPACK_GLOBAL(dopgtr,DOPGTR)
#define LAPACK_sopmtr LAPACK_GLOBAL(sopmtr,SOPMTR)
#define LAPACK_dopmtr LAPACK_GLOBAL(dopmtr,DOPMTR)
#define LAPACK_chptrd LAPACK_GLOBAL(chptrd,CHPTRD)
#define LAPACK_zhptrd LAPACK_GLOBAL(zhptrd,ZHPTRD)
#define LAPACK_cupgtr LAPACK_GLOBAL(cupgtr,CUPGTR)
#define LAPACK_zupgtr LAPACK_GLOBAL(zupgtr,ZUPGTR)
#define LAPACK_cupmtr LAPACK_GLOBAL(cupmtr,CUPMTR)
#define LAPACK_zupmtr LAPACK_GLOBAL(zupmtr,ZUPMTR)
#define LAPACK_ssbtrd LAPACK_GLOBAL(ssbtrd,SSBTRD)
#define LAPACK_dsbtrd LAPACK_GLOBAL(dsbtrd,DSBTRD)
#define LAPACK_chbtrd LAPACK_GLOBAL(chbtrd,CHBTRD)
#define LAPACK_zhbtrd LAPACK_GLOBAL(zhbtrd,ZHBTRD)
#define LAPACK_ssterf LAPACK_GLOBAL(ssterf,SSTERF)
#define LAPACK_dsterf LAPACK_GLOBAL(dsterf,DSTERF)
#define LAPACK_ssteqr LAPACK_GLOBAL(ssteqr,SSTEQR)
#define LAPACK_dsteqr LAPACK_GLOBAL(dsteqr,DSTEQR)
#define LAPACK_csteqr LAPACK_GLOBAL(csteqr,CSTEQR)
#define LAPACK_zsteqr LAPACK_GLOBAL(zsteqr,ZSTEQR)
#define LAPACK_sstemr LAPACK_GLOBAL(sstemr,SSTEMR)
#define LAPACK_dstemr LAPACK_GLOBAL(dstemr,DSTEMR)
#define LAPACK_cstemr LAPACK_GLOBAL(cstemr,CSTEMR)
#define LAPACK_zstemr LAPACK_GLOBAL(zstemr,ZSTEMR)
#define LAPACK_sstedc LAPACK_GLOBAL(sstedc,SSTEDC)
#define LAPACK_dstedc LAPACK_GLOBAL(dstedc,DSTEDC)
#define LAPACK_cstedc LAPACK_GLOBAL(cstedc,CSTEDC)
#define LAPACK_zstedc LAPACK_GLOBAL(zstedc,ZSTEDC)
#define LAPACK_sstegr LAPACK_GLOBAL(sstegr,SSTEGR)
#define LAPACK_dstegr LAPACK_GLOBAL(dstegr,DSTEGR)
#define LAPACK_cstegr LAPACK_GLOBAL(cstegr,CSTEGR)
#define LAPACK_zstegr LAPACK_GLOBAL(zstegr,ZSTEGR)
#define LAPACK_spteqr LAPACK_GLOBAL(spteqr,SPTEQR)
#define LAPACK_dpteqr LAPACK_GLOBAL(dpteqr,DPTEQR)
#define LAPACK_cpteqr LAPACK_GLOBAL(cpteqr,CPTEQR)
#define LAPACK_zpteqr LAPACK_GLOBAL(zpteqr,ZPTEQR)
#define LAPACK_sstebz LAPACK_GLOBAL(sstebz,SSTEBZ)
#define LAPACK_dstebz LAPACK_GLOBAL(dstebz,DSTEBZ)
#define LAPACK_sstein LAPACK_GLOBAL(sstein,SSTEIN)
#define LAPACK_dstein LAPACK_GLOBAL(dstein,DSTEIN)
#define LAPACK_cstein LAPACK_GLOBAL(cstein,CSTEIN)
#define LAPACK_zstein LAPACK_GLOBAL(zstein,ZSTEIN)
#define LAPACK_sdisna LAPACK_GLOBAL(sdisna,SDISNA)
#define LAPACK_ddisna LAPACK_GLOBAL(ddisna,DDISNA)
#define LAPACK_ssygst LAPACK_GLOBAL(ssygst,SSYGST)
#define LAPACK_dsygst LAPACK_GLOBAL(dsygst,DSYGST)
#define LAPACK_chegst LAPACK_GLOBAL(chegst,CHEGST)
#define LAPACK_zhegst LAPACK_GLOBAL(zhegst,ZHEGST)
#define LAPACK_sspgst LAPACK_GLOBAL(sspgst,SSPGST)
#define LAPACK_dspgst LAPACK_GLOBAL(dspgst,DSPGST)
#define LAPACK_chpgst LAPACK_GLOBAL(chpgst,CHPGST)
#define LAPACK_zhpgst LAPACK_GLOBAL(zhpgst,ZHPGST)
#define LAPACK_ssbgst LAPACK_GLOBAL(ssbgst,SSBGST)
#define LAPACK_dsbgst LAPACK_GLOBAL(dsbgst,DSBGST)
#define LAPACK_chbgst LAPACK_GLOBAL(chbgst,CHBGST)
#define LAPACK_zhbgst LAPACK_GLOBAL(zhbgst,ZHBGST)
#define LAPACK_spbstf LAPACK_GLOBAL(spbstf,SPBSTF)
#define LAPACK_dpbstf LAPACK_GLOBAL(dpbstf,DPBSTF)
#define LAPACK_cpbstf LAPACK_GLOBAL(cpbstf,CPBSTF)
#define LAPACK_zpbstf LAPACK_GLOBAL(zpbstf,ZPBSTF)
#define LAPACK_sgehrd LAPACK_GLOBAL(sgehrd,SGEHRD)
#define LAPACK_dgehrd LAPACK_GLOBAL(dgehrd,DGEHRD)
#define LAPACK_cgehrd LAPACK_GLOBAL(cgehrd,CGEHRD)
#define LAPACK_zgehrd LAPACK_GLOBAL(zgehrd,ZGEHRD)
#define LAPACK_sorghr LAPACK_GLOBAL(sorghr,SORGHR)
#define LAPACK_dorghr LAPACK_GLOBAL(dorghr,DORGHR)
#define LAPACK_sormhr LAPACK_GLOBAL(sormhr,SORMHR)
#define LAPACK_dormhr LAPACK_GLOBAL(dormhr,DORMHR)
#define LAPACK_cunghr LAPACK_GLOBAL(cunghr,CUNGHR)
#define LAPACK_zunghr LAPACK_GLOBAL(zunghr,ZUNGHR)
#define LAPACK_cunmhr LAPACK_GLOBAL(cunmhr,CUNMHR)
#define LAPACK_zunmhr LAPACK_GLOBAL(zunmhr,ZUNMHR)
#define LAPACK_sgebal LAPACK_GLOBAL(sgebal,SGEBAL)
#define LAPACK_dgebal LAPACK_GLOBAL(dgebal,DGEBAL)
#define LAPACK_cgebal LAPACK_GLOBAL(cgebal,CGEBAL)
#define LAPACK_zgebal LAPACK_GLOBAL(zgebal,ZGEBAL)
#define LAPACK_sgebak LAPACK_GLOBAL(sgebak,SGEBAK)
#define LAPACK_dgebak LAPACK_GLOBAL(dgebak,DGEBAK)
#define LAPACK_cgebak LAPACK_GLOBAL(cgebak,CGEBAK)
#define LAPACK_zgebak LAPACK_GLOBAL(zgebak,ZGEBAK)
#define LAPACK_shseqr LAPACK_GLOBAL(shseqr,SHSEQR)
#define LAPACK_dhseqr LAPACK_GLOBAL(dhseqr,DHSEQR)
#define LAPACK_chseqr LAPACK_GLOBAL(chseqr,CHSEQR)
#define LAPACK_zhseqr LAPACK_GLOBAL(zhseqr,ZHSEQR)
#define LAPACK_shsein LAPACK_GLOBAL(shsein,SHSEIN)
#define LAPACK_dhsein LAPACK_GLOBAL(dhsein,DHSEIN)
#define LAPACK_chsein LAPACK_GLOBAL(chsein,CHSEIN)
#define LAPACK_zhsein LAPACK_GLOBAL(zhsein,ZHSEIN)
#define LAPACK_strevc LAPACK_GLOBAL(strevc,STREVC)
#define LAPACK_dtrevc LAPACK_GLOBAL(dtrevc,DTREVC)
#define LAPACK_ctrevc LAPACK_GLOBAL(ctrevc,CTREVC)
#define LAPACK_ztrevc LAPACK_GLOBAL(ztrevc,ZTREVC)
#define LAPACK_strsna LAPACK_GLOBAL(strsna,STRSNA)
#define LAPACK_dtrsna LAPACK_GLOBAL(dtrsna,DTRSNA)
#define LAPACK_ctrsna LAPACK_GLOBAL(ctrsna,CTRSNA)
#define LAPACK_ztrsna LAPACK_GLOBAL(ztrsna,ZTRSNA)
#define LAPACK_strexc LAPACK_GLOBAL(strexc,STREXC)
#define LAPACK_dtrexc LAPACK_GLOBAL(dtrexc,DTREXC)
#define LAPACK_ctrexc LAPACK_GLOBAL(ctrexc,CTREXC)
#define LAPACK_ztrexc LAPACK_GLOBAL(ztrexc,ZTREXC)
#define LAPACK_strsen LAPACK_GLOBAL(strsen,STRSEN)
#define LAPACK_dtrsen LAPACK_GLOBAL(dtrsen,DTRSEN)
#define LAPACK_ctrsen LAPACK_GLOBAL(ctrsen,CTRSEN)
#define LAPACK_ztrsen LAPACK_GLOBAL(ztrsen,ZTRSEN)
#define LAPACK_strsyl LAPACK_GLOBAL(strsyl,STRSYL)
#define LAPACK_dtrsyl LAPACK_GLOBAL(dtrsyl,DTRSYL)
#define LAPACK_ctrsyl LAPACK_GLOBAL(ctrsyl,CTRSYL)
#define LAPACK_ztrsyl LAPACK_GLOBAL(ztrsyl,ZTRSYL)
#define LAPACK_sgghrd LAPACK_GLOBAL(sgghrd,SGGHRD)
#define LAPACK_dgghrd LAPACK_GLOBAL(dgghrd,DGGHRD)
#define LAPACK_cgghrd LAPACK_GLOBAL(cgghrd,CGGHRD)
#define LAPACK_zgghrd LAPACK_GLOBAL(zgghrd,ZGGHRD)
#define LAPACK_sggbal LAPACK_GLOBAL(sggbal,SGGBAL)
#define LAPACK_dggbal LAPACK_GLOBAL(dggbal,DGGBAL)
#define LAPACK_cggbal LAPACK_GLOBAL(cggbal,CGGBAL)
#define LAPACK_zggbal LAPACK_GLOBAL(zggbal,ZGGBAL)
#define LAPACK_sggbak LAPACK_GLOBAL(sggbak,SGGBAK)
#define LAPACK_dggbak LAPACK_GLOBAL(dggbak,DGGBAK)
#define LAPACK_cggbak LAPACK_GLOBAL(cggbak,CGGBAK)
#define LAPACK_zggbak LAPACK_GLOBAL(zggbak,ZGGBAK)
#define LAPACK_shgeqz LAPACK_GLOBAL(shgeqz,SHGEQZ)
#define LAPACK_dhgeqz LAPACK_GLOBAL(dhgeqz,DHGEQZ)
#define LAPACK_chgeqz LAPACK_GLOBAL(chgeqz,CHGEQZ)
#define LAPACK_zhgeqz LAPACK_GLOBAL(zhgeqz,ZHGEQZ)
#define LAPACK_stgevc LAPACK_GLOBAL(stgevc,STGEVC)
#define LAPACK_dtgevc LAPACK_GLOBAL(dtgevc,DTGEVC)
#define LAPACK_ctgevc LAPACK_GLOBAL(ctgevc,CTGEVC)
#define LAPACK_ztgevc LAPACK_GLOBAL(ztgevc,ZTGEVC)
#define LAPACK_stgexc LAPACK_GLOBAL(stgexc,STGEXC)
#define LAPACK_dtgexc LAPACK_GLOBAL(dtgexc,DTGEXC)
#define LAPACK_ctgexc LAPACK_GLOBAL(ctgexc,CTGEXC)
#define LAPACK_ztgexc LAPACK_GLOBAL(ztgexc,ZTGEXC)
#define LAPACK_stgsen LAPACK_GLOBAL(stgsen,STGSEN)
#define LAPACK_dtgsen LAPACK_GLOBAL(dtgsen,DTGSEN)
#define LAPACK_ctgsen LAPACK_GLOBAL(ctgsen,CTGSEN)
#define LAPACK_ztgsen LAPACK_GLOBAL(ztgsen,ZTGSEN)
#define LAPACK_stgsyl LAPACK_GLOBAL(stgsyl,STGSYL)
#define LAPACK_dtgsyl LAPACK_GLOBAL(dtgsyl,DTGSYL)
#define LAPACK_ctgsyl LAPACK_GLOBAL(ctgsyl,CTGSYL)
#define LAPACK_ztgsyl LAPACK_GLOBAL(ztgsyl,ZTGSYL)
#define LAPACK_stgsna LAPACK_GLOBAL(stgsna,STGSNA)
#define LAPACK_dtgsna LAPACK_GLOBAL(dtgsna,DTGSNA)
#define LAPACK_ctgsna LAPACK_GLOBAL(ctgsna,CTGSNA)
#define LAPACK_ztgsna LAPACK_GLOBAL(ztgsna,ZTGSNA)
#define LAPACK_sggsvp LAPACK_GLOBAL(sggsvp,SGGSVP)
#define LAPACK_dggsvp LAPACK_GLOBAL(dggsvp,DGGSVP)
#define LAPACK_cggsvp LAPACK_GLOBAL(cggsvp,CGGSVP)
#define LAPACK_zggsvp LAPACK_GLOBAL(zggsvp,ZGGSVP)
#define LAPACK_stgsja LAPACK_GLOBAL(stgsja,STGSJA)
#define LAPACK_dtgsja LAPACK_GLOBAL(dtgsja,DTGSJA)
#define LAPACK_ctgsja LAPACK_GLOBAL(ctgsja,CTGSJA)
#define LAPACK_ztgsja LAPACK_GLOBAL(ztgsja,ZTGSJA)
#define LAPACK_sgels LAPACK_GLOBAL(sgels,SGELS)
#define LAPACK_dgels LAPACK_GLOBAL(dgels,DGELS)
#define LAPACK_cgels LAPACK_GLOBAL(cgels,CGELS)
#define LAPACK_zgels LAPACK_GLOBAL(zgels,ZGELS)
#define LAPACK_sgelsy LAPACK_GLOBAL(sgelsy,SGELSY)
#define LAPACK_dgelsy LAPACK_GLOBAL(dgelsy,DGELSY)
#define LAPACK_cgelsy LAPACK_GLOBAL(cgelsy,CGELSY)
#define LAPACK_zgelsy LAPACK_GLOBAL(zgelsy,ZGELSY)
#define LAPACK_sgelss LAPACK_GLOBAL(sgelss,SGELSS)
#define LAPACK_dgelss LAPACK_GLOBAL(dgelss,DGELSS)
#define LAPACK_cgelss LAPACK_GLOBAL(cgelss,CGELSS)
#define LAPACK_zgelss LAPACK_GLOBAL(zgelss,ZGELSS)
#define LAPACK_sgelsd LAPACK_GLOBAL(sgelsd,SGELSD)
#define LAPACK_dgelsd LAPACK_GLOBAL(dgelsd,DGELSD)
#define LAPACK_cgelsd LAPACK_GLOBAL(cgelsd,CGELSD)
#define LAPACK_zgelsd LAPACK_GLOBAL(zgelsd,ZGELSD)
#define LAPACK_sgglse LAPACK_GLOBAL(sgglse,SGGLSE)
#define LAPACK_dgglse LAPACK_GLOBAL(dgglse,DGGLSE)
#define LAPACK_cgglse LAPACK_GLOBAL(cgglse,CGGLSE)
#define LAPACK_zgglse LAPACK_GLOBAL(zgglse,ZGGLSE)
#define LAPACK_sggglm LAPACK_GLOBAL(sggglm,SGGGLM)
#define LAPACK_dggglm LAPACK_GLOBAL(dggglm,DGGGLM)
#define LAPACK_cggglm LAPACK_GLOBAL(cggglm,CGGGLM)
#define LAPACK_zggglm LAPACK_GLOBAL(zggglm,ZGGGLM)
#define LAPACK_ssyev LAPACK_GLOBAL(ssyev,SSYEV)
#define LAPACK_dsyev LAPACK_GLOBAL(dsyev,DSYEV)
#define LAPACK_cheev LAPACK_GLOBAL(cheev,CHEEV)
#define LAPACK_zheev LAPACK_GLOBAL(zheev,ZHEEV)
#define LAPACK_ssyevd LAPACK_GLOBAL(ssyevd,SSYEVD)
#define LAPACK_dsyevd LAPACK_GLOBAL(dsyevd,DSYEVD)
#define LAPACK_cheevd LAPACK_GLOBAL(cheevd,CHEEVD)
#define LAPACK_zheevd LAPACK_GLOBAL(zheevd,ZHEEVD)
#define LAPACK_ssyevx LAPACK_GLOBAL(ssyevx,SSYEVX)
#define LAPACK_dsyevx LAPACK_GLOBAL(dsyevx,DSYEVX)
#define LAPACK_cheevx LAPACK_GLOBAL(cheevx,CHEEVX)
#define LAPACK_zheevx LAPACK_GLOBAL(zheevx,ZHEEVX)
#define LAPACK_ssyevr LAPACK_GLOBAL(ssyevr,SSYEVR)
#define LAPACK_dsyevr LAPACK_GLOBAL(dsyevr,DSYEVR)
#define LAPACK_cheevr LAPACK_GLOBAL(cheevr,CHEEVR)
#define LAPACK_zheevr LAPACK_GLOBAL(zheevr,ZHEEVR)
#define LAPACK_sspev LAPACK_GLOBAL(sspev,SSPEV)
#define LAPACK_dspev LAPACK_GLOBAL(dspev,DSPEV)
#define LAPACK_chpev LAPACK_GLOBAL(chpev,CHPEV)
#define LAPACK_zhpev LAPACK_GLOBAL(zhpev,ZHPEV)
#define LAPACK_sspevd LAPACK_GLOBAL(sspevd,SSPEVD)
#define LAPACK_dspevd LAPACK_GLOBAL(dspevd,DSPEVD)
#define LAPACK_chpevd LAPACK_GLOBAL(chpevd,CHPEVD)
#define LAPACK_zhpevd LAPACK_GLOBAL(zhpevd,ZHPEVD)
#define LAPACK_sspevx LAPACK_GLOBAL(sspevx,SSPEVX)
#define LAPACK_dspevx LAPACK_GLOBAL(dspevx,DSPEVX)
#define LAPACK_chpevx LAPACK_GLOBAL(chpevx,CHPEVX)
#define LAPACK_zhpevx LAPACK_GLOBAL(zhpevx,ZHPEVX)
#define LAPACK_ssbev LAPACK_GLOBAL(ssbev,SSBEV)
#define LAPACK_dsbev LAPACK_GLOBAL(dsbev,DSBEV)
#define LAPACK_chbev LAPACK_GLOBAL(chbev,CHBEV)
#define LAPACK_zhbev LAPACK_GLOBAL(zhbev,ZHBEV)
#define LAPACK_ssbevd LAPACK_GLOBAL(ssbevd,SSBEVD)
#define LAPACK_dsbevd LAPACK_GLOBAL(dsbevd,DSBEVD)
#define LAPACK_chbevd LAPACK_GLOBAL(chbevd,CHBEVD)
#define LAPACK_zhbevd LAPACK_GLOBAL(zhbevd,ZHBEVD)
#define LAPACK_ssbevx LAPACK_GLOBAL(ssbevx,SSBEVX)
#define LAPACK_dsbevx LAPACK_GLOBAL(dsbevx,DSBEVX)
#define LAPACK_chbevx LAPACK_GLOBAL(chbevx,CHBEVX)
#define LAPACK_zhbevx LAPACK_GLOBAL(zhbevx,ZHBEVX)
#define LAPACK_sstev LAPACK_GLOBAL(sstev,SSTEV)
#define LAPACK_dstev LAPACK_GLOBAL(dstev,DSTEV)
#define LAPACK_sstevd LAPACK_GLOBAL(sstevd,SSTEVD)
#define LAPACK_dstevd LAPACK_GLOBAL(dstevd,DSTEVD)
#define LAPACK_sstevx LAPACK_GLOBAL(sstevx,SSTEVX)
#define LAPACK_dstevx LAPACK_GLOBAL(dstevx,DSTEVX)
#define LAPACK_sstevr LAPACK_GLOBAL(sstevr,SSTEVR)
#define LAPACK_dstevr LAPACK_GLOBAL(dstevr,DSTEVR)
#define LAPACK_sgees LAPACK_GLOBAL(sgees,SGEES)
#define LAPACK_dgees LAPACK_GLOBAL(dgees,DGEES)
#define LAPACK_cgees LAPACK_GLOBAL(cgees,CGEES)
#define LAPACK_zgees LAPACK_GLOBAL(zgees,ZGEES)
#define LAPACK_sgeesx LAPACK_GLOBAL(sgeesx,SGEESX)
#define LAPACK_dgeesx LAPACK_GLOBAL(dgeesx,DGEESX)
#define LAPACK_cgeesx LAPACK_GLOBAL(cgeesx,CGEESX)
#define LAPACK_zgeesx LAPACK_GLOBAL(zgeesx,ZGEESX)
#define LAPACK_sgeev LAPACK_GLOBAL(sgeev,SGEEV)
#define LAPACK_dgeev LAPACK_GLOBAL(dgeev,DGEEV)
#define LAPACK_cgeev LAPACK_GLOBAL(cgeev,CGEEV)
#define LAPACK_zgeev LAPACK_GLOBAL(zgeev,ZGEEV)
#define LAPACK_sgeevx LAPACK_GLOBAL(sgeevx,SGEEVX)
#define LAPACK_dgeevx LAPACK_GLOBAL(dgeevx,DGEEVX)
#define LAPACK_cgeevx LAPACK_GLOBAL(cgeevx,CGEEVX)
#define LAPACK_zgeevx LAPACK_GLOBAL(zgeevx,ZGEEVX)
#define LAPACK_sgesvd LAPACK_GLOBAL(sgesvd,SGESVD)
#define LAPACK_dgesvd LAPACK_GLOBAL(dgesvd,DGESVD)
#define LAPACK_cgesvd LAPACK_GLOBAL(cgesvd,CGESVD)
#define LAPACK_zgesvd LAPACK_GLOBAL(zgesvd,ZGESVD)
#define LAPACK_sgesdd LAPACK_GLOBAL(sgesdd,SGESDD)
#define LAPACK_dgesdd LAPACK_GLOBAL(dgesdd,DGESDD)
#define LAPACK_cgesdd LAPACK_GLOBAL(cgesdd,CGESDD)
#define LAPACK_zgesdd LAPACK_GLOBAL(zgesdd,ZGESDD)
#define LAPACK_dgejsv LAPACK_GLOBAL(dgejsv,DGEJSV)
#define LAPACK_sgejsv LAPACK_GLOBAL(sgejsv,SGEJSV)
#define LAPACK_dgesvj LAPACK_GLOBAL(dgesvj,DGESVJ)
#define LAPACK_sgesvj LAPACK_GLOBAL(sgesvj,SGESVJ)
#define LAPACK_sggsvd LAPACK_GLOBAL(sggsvd,SGGSVD)
#define LAPACK_dggsvd LAPACK_GLOBAL(dggsvd,DGGSVD)
#define LAPACK_cggsvd LAPACK_GLOBAL(cggsvd,CGGSVD)
#define LAPACK_zggsvd LAPACK_GLOBAL(zggsvd,ZGGSVD)
#define LAPACK_ssygv LAPACK_GLOBAL(ssygv,SSYGV)
#define LAPACK_dsygv LAPACK_GLOBAL(dsygv,DSYGV)
#define LAPACK_chegv LAPACK_GLOBAL(chegv,CHEGV)
#define LAPACK_zhegv LAPACK_GLOBAL(zhegv,ZHEGV)
#define LAPACK_ssygvd LAPACK_GLOBAL(ssygvd,SSYGVD)
#define LAPACK_dsygvd LAPACK_GLOBAL(dsygvd,DSYGVD)
#define LAPACK_chegvd LAPACK_GLOBAL(chegvd,CHEGVD)
#define LAPACK_zhegvd LAPACK_GLOBAL(zhegvd,ZHEGVD)
#define LAPACK_ssygvx LAPACK_GLOBAL(ssygvx,SSYGVX)
#define LAPACK_dsygvx LAPACK_GLOBAL(dsygvx,DSYGVX)
#define LAPACK_chegvx LAPACK_GLOBAL(chegvx,CHEGVX)
#define LAPACK_zhegvx LAPACK_GLOBAL(zhegvx,ZHEGVX)
#define LAPACK_sspgv LAPACK_GLOBAL(sspgv,SSPGV)
#define LAPACK_dspgv LAPACK_GLOBAL(dspgv,DSPGV)
#define LAPACK_chpgv LAPACK_GLOBAL(chpgv,CHPGV)
#define LAPACK_zhpgv LAPACK_GLOBAL(zhpgv,ZHPGV)
#define LAPACK_sspgvd LAPACK_GLOBAL(sspgvd,SSPGVD)
#define LAPACK_dspgvd LAPACK_GLOBAL(dspgvd,DSPGVD)
#define LAPACK_chpgvd LAPACK_GLOBAL(chpgvd,CHPGVD)
#define LAPACK_zhpgvd LAPACK_GLOBAL(zhpgvd,ZHPGVD)
#define LAPACK_sspgvx LAPACK_GLOBAL(sspgvx,SSPGVX)
#define LAPACK_dspgvx LAPACK_GLOBAL(dspgvx,DSPGVX)
#define LAPACK_chpgvx LAPACK_GLOBAL(chpgvx,CHPGVX)
#define LAPACK_zhpgvx LAPACK_GLOBAL(zhpgvx,ZHPGVX)
#define LAPACK_ssbgv LAPACK_GLOBAL(ssbgv,SSBGV)
#define LAPACK_dsbgv LAPACK_GLOBAL(dsbgv,DSBGV)
#define LAPACK_chbgv LAPACK_GLOBAL(chbgv,CHBGV)
#define LAPACK_zhbgv LAPACK_GLOBAL(zhbgv,ZHBGV)
#define LAPACK_ssbgvd LAPACK_GLOBAL(ssbgvd,SSBGVD)
#define LAPACK_dsbgvd LAPACK_GLOBAL(dsbgvd,DSBGVD)
#define LAPACK_chbgvd LAPACK_GLOBAL(chbgvd,CHBGVD)
#define LAPACK_zhbgvd LAPACK_GLOBAL(zhbgvd,ZHBGVD)
#define LAPACK_ssbgvx LAPACK_GLOBAL(ssbgvx,SSBGVX)
#define LAPACK_dsbgvx LAPACK_GLOBAL(dsbgvx,DSBGVX)
#define LAPACK_chbgvx LAPACK_GLOBAL(chbgvx,CHBGVX)
#define LAPACK_zhbgvx LAPACK_GLOBAL(zhbgvx,ZHBGVX)
#define LAPACK_sgges LAPACK_GLOBAL(sgges,SGGES)
#define LAPACK_dgges LAPACK_GLOBAL(dgges,DGGES)
#define LAPACK_cgges LAPACK_GLOBAL(cgges,CGGES)
#define LAPACK_zgges LAPACK_GLOBAL(zgges,ZGGES)
#define LAPACK_sggesx LAPACK_GLOBAL(sggesx,SGGESX)
#define LAPACK_dggesx LAPACK_GLOBAL(dggesx,DGGESX)
#define LAPACK_cggesx LAPACK_GLOBAL(cggesx,CGGESX)
#define LAPACK_zggesx LAPACK_GLOBAL(zggesx,ZGGESX)
#define LAPACK_sggev LAPACK_GLOBAL(sggev,SGGEV)
#define LAPACK_dggev LAPACK_GLOBAL(dggev,DGGEV)
#define LAPACK_cggev LAPACK_GLOBAL(cggev,CGGEV)
#define LAPACK_zggev LAPACK_GLOBAL(zggev,ZGGEV)
#define LAPACK_sggevx LAPACK_GLOBAL(sggevx,SGGEVX)
#define LAPACK_dggevx LAPACK_GLOBAL(dggevx,DGGEVX)
#define LAPACK_cggevx LAPACK_GLOBAL(cggevx,CGGEVX)
#define LAPACK_zggevx LAPACK_GLOBAL(zggevx,ZGGEVX)
#define LAPACK_dsfrk LAPACK_GLOBAL(dsfrk,DSFRK)
#define LAPACK_ssfrk LAPACK_GLOBAL(ssfrk,SSFRK)
#define LAPACK_zhfrk LAPACK_GLOBAL(zhfrk,ZHFRK)
#define LAPACK_chfrk LAPACK_GLOBAL(chfrk,CHFRK)
#define LAPACK_dtfsm LAPACK_GLOBAL(dtfsm,DTFSM)
#define LAPACK_stfsm LAPACK_GLOBAL(stfsm,STFSM)
#define LAPACK_ztfsm LAPACK_GLOBAL(ztfsm,ZTFSM)
#define LAPACK_ctfsm LAPACK_GLOBAL(ctfsm,CTFSM)
#define LAPACK_dtfttp LAPACK_GLOBAL(dtfttp,DTFTTP)
#define LAPACK_stfttp LAPACK_GLOBAL(stfttp,STFTTP)
#define LAPACK_ztfttp LAPACK_GLOBAL(ztfttp,ZTFTTP)
#define LAPACK_ctfttp LAPACK_GLOBAL(ctfttp,CTFTTP)
#define LAPACK_dtfttr LAPACK_GLOBAL(dtfttr,DTFTTR)
#define LAPACK_stfttr LAPACK_GLOBAL(stfttr,STFTTR)
#define LAPACK_ztfttr LAPACK_GLOBAL(ztfttr,ZTFTTR)
#define LAPACK_ctfttr LAPACK_GLOBAL(ctfttr,CTFTTR)
#define LAPACK_dtpttf LAPACK_GLOBAL(dtpttf,DTPTTF)
#define LAPACK_stpttf LAPACK_GLOBAL(stpttf,STPTTF)
#define LAPACK_ztpttf LAPACK_GLOBAL(ztpttf,ZTPTTF)
#define LAPACK_ctpttf LAPACK_GLOBAL(ctpttf,CTPTTF)
#define LAPACK_dtpttr LAPACK_GLOBAL(dtpttr,DTPTTR)
#define LAPACK_stpttr LAPACK_GLOBAL(stpttr,STPTTR)
#define LAPACK_ztpttr LAPACK_GLOBAL(ztpttr,ZTPTTR)
#define LAPACK_ctpttr LAPACK_GLOBAL(ctpttr,CTPTTR)
#define LAPACK_dtrttf LAPACK_GLOBAL(dtrttf,DTRTTF)
#define LAPACK_strttf LAPACK_GLOBAL(strttf,STRTTF)
#define LAPACK_ztrttf LAPACK_GLOBAL(ztrttf,ZTRTTF)
#define LAPACK_ctrttf LAPACK_GLOBAL(ctrttf,CTRTTF)
#define LAPACK_dtrttp LAPACK_GLOBAL(dtrttp,DTRTTP)
#define LAPACK_strttp LAPACK_GLOBAL(strttp,STRTTP)
#define LAPACK_ztrttp LAPACK_GLOBAL(ztrttp,ZTRTTP)
#define LAPACK_ctrttp LAPACK_GLOBAL(ctrttp,CTRTTP)
#define LAPACK_sgeqrfp LAPACK_GLOBAL(sgeqrfp,SGEQRFP)
#define LAPACK_dgeqrfp LAPACK_GLOBAL(dgeqrfp,DGEQRFP)
#define LAPACK_cgeqrfp LAPACK_GLOBAL(cgeqrfp,CGEQRFP)
#define LAPACK_zgeqrfp LAPACK_GLOBAL(zgeqrfp,ZGEQRFP)
#define LAPACK_clacgv LAPACK_GLOBAL(clacgv,CLACGV)
#define LAPACK_zlacgv LAPACK_GLOBAL(zlacgv,ZLACGV)
#define LAPACK_slarnv LAPACK_GLOBAL(slarnv,SLARNV)
#define LAPACK_dlarnv LAPACK_GLOBAL(dlarnv,DLARNV)
#define LAPACK_clarnv LAPACK_GLOBAL(clarnv,CLARNV)
#define LAPACK_zlarnv LAPACK_GLOBAL(zlarnv,ZLARNV)
#define LAPACK_sgeqr2 LAPACK_GLOBAL(sgeqr2,SGEQR2)
#define LAPACK_dgeqr2 LAPACK_GLOBAL(dgeqr2,DGEQR2)
#define LAPACK_cgeqr2 LAPACK_GLOBAL(cgeqr2,CGEQR2)
#define LAPACK_zgeqr2 LAPACK_GLOBAL(zgeqr2,ZGEQR2)
#define LAPACK_slacpy LAPACK_GLOBAL(slacpy,SLACPY)
#define LAPACK_dlacpy LAPACK_GLOBAL(dlacpy,DLACPY)
#define LAPACK_clacpy LAPACK_GLOBAL(clacpy,CLACPY)
#define LAPACK_zlacpy LAPACK_GLOBAL(zlacpy,ZLACPY)
#define LAPACK_sgetf2 LAPACK_GLOBAL(sgetf2,SGETF2)
#define LAPACK_dgetf2 LAPACK_GLOBAL(dgetf2,DGETF2)
#define LAPACK_cgetf2 LAPACK_GLOBAL(cgetf2,CGETF2)
#define LAPACK_zgetf2 LAPACK_GLOBAL(zgetf2,ZGETF2)
#define LAPACK_slaswp LAPACK_GLOBAL(slaswp,SLASWP)
#define LAPACK_dlaswp LAPACK_GLOBAL(dlaswp,DLASWP)
#define LAPACK_claswp LAPACK_GLOBAL(claswp,CLASWP)
#define LAPACK_zlaswp LAPACK_GLOBAL(zlaswp,ZLASWP)
#define LAPACK_slange LAPACK_GLOBAL(slange,SLANGE)
#define LAPACK_dlange LAPACK_GLOBAL(dlange,DLANGE)
#define LAPACK_clange LAPACK_GLOBAL(clange,CLANGE)
#define LAPACK_zlange LAPACK_GLOBAL(zlange,ZLANGE)
#define LAPACK_clanhe LAPACK_GLOBAL(clanhe,CLANHE)
#define LAPACK_zlanhe LAPACK_GLOBAL(zlanhe,ZLANHE)
#define LAPACK_slansy LAPACK_GLOBAL(slansy,SLANSY)
#define LAPACK_dlansy LAPACK_GLOBAL(dlansy,DLANSY)
#define LAPACK_clansy LAPACK_GLOBAL(clansy,CLANSY)
#define LAPACK_zlansy LAPACK_GLOBAL(zlansy,ZLANSY)
#define LAPACK_slantr LAPACK_GLOBAL(slantr,SLANTR)
#define LAPACK_dlantr LAPACK_GLOBAL(dlantr,DLANTR)
#define LAPACK_clantr LAPACK_GLOBAL(clantr,CLANTR)
#define LAPACK_zlantr LAPACK_GLOBAL(zlantr,ZLANTR)
#define LAPACK_slamch LAPACK_GLOBAL(slamch,SLAMCH)
#define LAPACK_dlamch LAPACK_GLOBAL(dlamch,DLAMCH)
#define LAPACK_sgelq2 LAPACK_GLOBAL(sgelq2,SGELQ2)
#define LAPACK_dgelq2 LAPACK_GLOBAL(dgelq2,DGELQ2)
#define LAPACK_cgelq2 LAPACK_GLOBAL(cgelq2,CGELQ2)
#define LAPACK_zgelq2 LAPACK_GLOBAL(zgelq2,ZGELQ2)
#define LAPACK_slarfb LAPACK_GLOBAL(slarfb,SLARFB)
#define LAPACK_dlarfb LAPACK_GLOBAL(dlarfb,DLARFB)
#define LAPACK_clarfb LAPACK_GLOBAL(clarfb,CLARFB)
#define LAPACK_zlarfb LAPACK_GLOBAL(zlarfb,ZLARFB)
#define LAPACK_slarfg LAPACK_GLOBAL(slarfg,SLARFG)
#define LAPACK_dlarfg LAPACK_GLOBAL(dlarfg,DLARFG)
#define LAPACK_clarfg LAPACK_GLOBAL(clarfg,CLARFG)
#define LAPACK_zlarfg LAPACK_GLOBAL(zlarfg,ZLARFG)
#define LAPACK_slarft LAPACK_GLOBAL(slarft,SLARFT)
#define LAPACK_dlarft LAPACK_GLOBAL(dlarft,DLARFT)
#define LAPACK_clarft LAPACK_GLOBAL(clarft,CLARFT)
#define LAPACK_zlarft LAPACK_GLOBAL(zlarft,ZLARFT)
#define LAPACK_slarfx LAPACK_GLOBAL(slarfx,SLARFX)
#define LAPACK_dlarfx LAPACK_GLOBAL(dlarfx,DLARFX)
#define LAPACK_clarfx LAPACK_GLOBAL(clarfx,CLARFX)
#define LAPACK_zlarfx LAPACK_GLOBAL(zlarfx,ZLARFX)
#define LAPACK_slatms LAPACK_GLOBAL(slatms,SLATMS)
#define LAPACK_dlatms LAPACK_GLOBAL(dlatms,DLATMS)
#define LAPACK_clatms LAPACK_GLOBAL(clatms,CLATMS)
#define LAPACK_zlatms LAPACK_GLOBAL(zlatms,ZLATMS)
#define LAPACK_slag2d LAPACK_GLOBAL(slag2d,SLAG2D)
#define LAPACK_dlag2s LAPACK_GLOBAL(dlag2s,DLAG2S)
#define LAPACK_clag2z LAPACK_GLOBAL(clag2z,CLAG2Z)
#define LAPACK_zlag2c LAPACK_GLOBAL(zlag2c,ZLAG2C)
#define LAPACK_slauum LAPACK_GLOBAL(slauum,SLAUUM)
#define LAPACK_dlauum LAPACK_GLOBAL(dlauum,DLAUUM)
#define LAPACK_clauum LAPACK_GLOBAL(clauum,CLAUUM)
#define LAPACK_zlauum LAPACK_GLOBAL(zlauum,ZLAUUM)
#define LAPACK_slagge LAPACK_GLOBAL(slagge,SLAGGE)
#define LAPACK_dlagge LAPACK_GLOBAL(dlagge,DLAGGE)
#define LAPACK_clagge LAPACK_GLOBAL(clagge,CLAGGE)
#define LAPACK_zlagge LAPACK_GLOBAL(zlagge,ZLAGGE)
#define LAPACK_slaset LAPACK_GLOBAL(slaset,SLASET)
#define LAPACK_dlaset LAPACK_GLOBAL(dlaset,DLASET)
#define LAPACK_claset LAPACK_GLOBAL(claset,CLASET)
#define LAPACK_zlaset LAPACK_GLOBAL(zlaset,ZLASET)
#define LAPACK_slasrt LAPACK_GLOBAL(slasrt,SLASRT)
#define LAPACK_dlasrt LAPACK_GLOBAL(dlasrt,DLASRT)
#define LAPACK_slagsy LAPACK_GLOBAL(slagsy,SLAGSY)
#define LAPACK_dlagsy LAPACK_GLOBAL(dlagsy,DLAGSY)
#define LAPACK_clagsy LAPACK_GLOBAL(clagsy,CLAGSY)
#define LAPACK_zlagsy LAPACK_GLOBAL(zlagsy,ZLAGSY)
#define LAPACK_claghe LAPACK_GLOBAL(claghe,CLAGHE)
#define LAPACK_zlaghe LAPACK_GLOBAL(zlaghe,ZLAGHE)
#define LAPACK_slapmr LAPACK_GLOBAL(slapmr,SLAPMR)
#define LAPACK_dlapmr LAPACK_GLOBAL(dlapmr,DLAPMR)
#define LAPACK_clapmr LAPACK_GLOBAL(clapmr,CLAPMR)
#define LAPACK_zlapmr LAPACK_GLOBAL(zlapmr,ZLAPMR)
#define LAPACK_slapy2 LAPACK_GLOBAL(slapy2,SLAPY2)
#define LAPACK_dlapy2 LAPACK_GLOBAL(dlapy2,DLAPY2)
#define LAPACK_slapy3 LAPACK_GLOBAL(slapy3,SLAPY3)
#define LAPACK_dlapy3 LAPACK_GLOBAL(dlapy3,DLAPY3)
#define LAPACK_slartgp LAPACK_GLOBAL(slartgp,SLARTGP)
#define LAPACK_dlartgp LAPACK_GLOBAL(dlartgp,DLARTGP)
#define LAPACK_slartgs LAPACK_GLOBAL(slartgs,SLARTGS)
#define LAPACK_dlartgs LAPACK_GLOBAL(dlartgs,DLARTGS)
// LAPACK 3.3.0
#define LAPACK_cbbcsd LAPACK_GLOBAL(cbbcsd,CBBCSD)
#define LAPACK_cheswapr LAPACK_GLOBAL(cheswapr,CHESWAPR)
#define LAPACK_chetri2 LAPACK_GLOBAL(chetri2,CHETRI2)
#define LAPACK_chetri2x LAPACK_GLOBAL(chetri2x,CHETRI2X)
#define LAPACK_chetrs2 LAPACK_GLOBAL(chetrs2,CHETRS2)
#define LAPACK_csyconv LAPACK_GLOBAL(csyconv,CSYCONV)
#define LAPACK_csyswapr LAPACK_GLOBAL(csyswapr,CSYSWAPR)
#define LAPACK_csytri2 LAPACK_GLOBAL(csytri2,CSYTRI2)
#define LAPACK_csytri2x LAPACK_GLOBAL(csytri2x,CSYTRI2X)
#define LAPACK_csytrs2 LAPACK_GLOBAL(csytrs2,CSYTRS2)
#define LAPACK_cunbdb LAPACK_GLOBAL(cunbdb,CUNBDB)
#define LAPACK_cuncsd LAPACK_GLOBAL(cuncsd,CUNCSD)
#define LAPACK_dbbcsd LAPACK_GLOBAL(dbbcsd,DBBCSD)
#define LAPACK_dorbdb LAPACK_GLOBAL(dorbdb,DORBDB)
#define LAPACK_dorcsd LAPACK_GLOBAL(dorcsd,DORCSD)
#define LAPACK_dsyconv LAPACK_GLOBAL(dsyconv,DSYCONV)
#define LAPACK_dsyswapr LAPACK_GLOBAL(dsyswapr,DSYSWAPR)
#define LAPACK_dsytri2 LAPACK_GLOBAL(dsytri2,DSYTRI2)
#define LAPACK_dsytri2x LAPACK_GLOBAL(dsytri2x,DSYTRI2X)
#define LAPACK_dsytrs2 LAPACK_GLOBAL(dsytrs2,DSYTRS2)
#define LAPACK_sbbcsd LAPACK_GLOBAL(sbbcsd,SBBCSD)
#define LAPACK_sorbdb LAPACK_GLOBAL(sorbdb,SORBDB)
#define LAPACK_sorcsd LAPACK_GLOBAL(sorcsd,SORCSD)
#define LAPACK_ssyconv LAPACK_GLOBAL(ssyconv,SSYCONV)
#define LAPACK_ssyswapr LAPACK_GLOBAL(ssyswapr,SSYSWAPR)
#define LAPACK_ssytri2 LAPACK_GLOBAL(ssytri2,SSYTRI2)
#define LAPACK_ssytri2x LAPACK_GLOBAL(ssytri2x,SSYTRI2X)
#define LAPACK_ssytrs2 LAPACK_GLOBAL(ssytrs2,SSYTRS2)
#define LAPACK_zbbcsd LAPACK_GLOBAL(zbbcsd,ZBBCSD)
#define LAPACK_zheswapr LAPACK_GLOBAL(zheswapr,ZHESWAPR)
#define LAPACK_zhetri2 LAPACK_GLOBAL(zhetri2,ZHETRI2)
#define LAPACK_zhetri2x LAPACK_GLOBAL(zhetri2x,ZHETRI2X)
#define LAPACK_zhetrs2 LAPACK_GLOBAL(zhetrs2,ZHETRS2)
#define LAPACK_zsyconv LAPACK_GLOBAL(zsyconv,ZSYCONV)
#define LAPACK_zsyswapr LAPACK_GLOBAL(zsyswapr,ZSYSWAPR)
#define LAPACK_zsytri2 LAPACK_GLOBAL(zsytri2,ZSYTRI2)
#define LAPACK_zsytri2x LAPACK_GLOBAL(zsytri2x,ZSYTRI2X)
#define LAPACK_zsytrs2 LAPACK_GLOBAL(zsytrs2,ZSYTRS2)
#define LAPACK_zunbdb LAPACK_GLOBAL(zunbdb,ZUNBDB)
#define LAPACK_zuncsd LAPACK_GLOBAL(zuncsd,ZUNCSD)
// LAPACK 3.4.0
#define LAPACK_sgemqrt LAPACK_GLOBAL(sgemqrt,SGEMQRT)
#define LAPACK_dgemqrt LAPACK_GLOBAL(dgemqrt,DGEMQRT)
#define LAPACK_cgemqrt LAPACK_GLOBAL(cgemqrt,CGEMQRT)
#define LAPACK_zgemqrt LAPACK_GLOBAL(zgemqrt,ZGEMQRT)
#define LAPACK_sgeqrt LAPACK_GLOBAL(sgeqrt,SGEQRT)
#define LAPACK_dgeqrt LAPACK_GLOBAL(dgeqrt,DGEQRT)
#define LAPACK_cgeqrt LAPACK_GLOBAL(cgeqrt,CGEQRT)
#define LAPACK_zgeqrt LAPACK_GLOBAL(zgeqrt,ZGEQRT)
#define LAPACK_sgeqrt2 LAPACK_GLOBAL(sgeqrt2,SGEQRT2)
#define LAPACK_dgeqrt2 LAPACK_GLOBAL(dgeqrt2,DGEQRT2)
#define LAPACK_cgeqrt2 LAPACK_GLOBAL(cgeqrt2,CGEQRT2)
#define LAPACK_zgeqrt2 LAPACK_GLOBAL(zgeqrt2,ZGEQRT2)
#define LAPACK_sgeqrt3 LAPACK_GLOBAL(sgeqrt3,SGEQRT3)
#define LAPACK_dgeqrt3 LAPACK_GLOBAL(dgeqrt3,DGEQRT3)
#define LAPACK_cgeqrt3 LAPACK_GLOBAL(cgeqrt3,CGEQRT3)
#define LAPACK_zgeqrt3 LAPACK_GLOBAL(zgeqrt3,ZGEQRT3)
#define LAPACK_stpmqrt LAPACK_GLOBAL(stpmqrt,STPMQRT)
#define LAPACK_dtpmqrt LAPACK_GLOBAL(dtpmqrt,DTPMQRT)
#define LAPACK_ctpmqrt LAPACK_GLOBAL(ctpmqrt,CTPMQRT)
#define LAPACK_ztpmqrt LAPACK_GLOBAL(ztpmqrt,ZTPMQRT)
#define LAPACK_dtpqrt LAPACK_GLOBAL(dtpqrt,DTPQRT)
#define LAPACK_ctpqrt LAPACK_GLOBAL(ctpqrt,CTPQRT)
#define LAPACK_ztpqrt LAPACK_GLOBAL(ztpqrt,ZTPQRT)
#define LAPACK_stpqrt2 LAPACK_GLOBAL(stpqrt2,STPQRT2)
#define LAPACK_dtpqrt2 LAPACK_GLOBAL(dtpqrt2,DTPQRT2)
#define LAPACK_ctpqrt2 LAPACK_GLOBAL(ctpqrt2,CTPQRT2)
#define LAPACK_ztpqrt2 LAPACK_GLOBAL(ztpqrt2,ZTPQRT2)
#define LAPACK_stprfb LAPACK_GLOBAL(stprfb,STPRFB)
#define LAPACK_dtprfb LAPACK_GLOBAL(dtprfb,DTPRFB)
#define LAPACK_ctprfb LAPACK_GLOBAL(ctprfb,CTPRFB)
#define LAPACK_ztprfb LAPACK_GLOBAL(ztprfb,ZTPRFB)
// LAPACK 3.X.X
#define LAPACK_csyr LAPACK_GLOBAL(csyr,CSYR)
#define LAPACK_zsyr LAPACK_GLOBAL(zsyr,ZSYR)
void LAPACK_sgetrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
lapack_int* ipiv, lapack_int *info );
void LAPACK_dgetrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
lapack_int* ipiv, lapack_int *info );
void LAPACK_cgetrf( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int* ipiv, lapack_int *info );
void LAPACK_zgetrf( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int* ipiv, lapack_int *info );
void LAPACK_sgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, float* ab, lapack_int* ldab,
lapack_int* ipiv, lapack_int *info );
void LAPACK_dgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, double* ab, lapack_int* ldab,
lapack_int* ipiv, lapack_int *info );
void LAPACK_cgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_complex_float* ab, lapack_int* ldab,
lapack_int* ipiv, lapack_int *info );
void LAPACK_zgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_complex_double* ab, lapack_int* ldab,
lapack_int* ipiv, lapack_int *info );
void LAPACK_sgttrf( lapack_int* n, float* dl, float* d, float* du, float* du2,
lapack_int* ipiv, lapack_int *info );
void LAPACK_dgttrf( lapack_int* n, double* dl, double* d, double* du,
double* du2, lapack_int* ipiv, lapack_int *info );
void LAPACK_cgttrf( lapack_int* n, lapack_complex_float* dl,
lapack_complex_float* d, lapack_complex_float* du,
lapack_complex_float* du2, lapack_int* ipiv,
lapack_int *info );
void LAPACK_zgttrf( lapack_int* n, lapack_complex_double* dl,
lapack_complex_double* d, lapack_complex_double* du,
lapack_complex_double* du2, lapack_int* ipiv,
lapack_int *info );
void LAPACK_spotrf( char* uplo, lapack_int* n, float* a, lapack_int* lda,
lapack_int *info );
void LAPACK_dpotrf( char* uplo, lapack_int* n, double* a, lapack_int* lda,
lapack_int *info );
void LAPACK_cpotrf( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int *info );
void LAPACK_zpotrf( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int *info );
void LAPACK_dpstrf( char* uplo, lapack_int* n, double* a, lapack_int* lda,
lapack_int* piv, lapack_int* rank, double* tol,
double* work, lapack_int *info );
void LAPACK_spstrf( char* uplo, lapack_int* n, float* a, lapack_int* lda,
lapack_int* piv, lapack_int* rank, float* tol, float* work,
lapack_int *info );
void LAPACK_zpstrf( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int* piv, lapack_int* rank,
double* tol, double* work, lapack_int *info );
void LAPACK_cpstrf( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int* piv, lapack_int* rank,
float* tol, float* work, lapack_int *info );
void LAPACK_dpftrf( char* transr, char* uplo, lapack_int* n, double* a,
lapack_int *info );
void LAPACK_spftrf( char* transr, char* uplo, lapack_int* n, float* a,
lapack_int *info );
void LAPACK_zpftrf( char* transr, char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int *info );
void LAPACK_cpftrf( char* transr, char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int *info );
void LAPACK_spptrf( char* uplo, lapack_int* n, float* ap, lapack_int *info );
void LAPACK_dpptrf( char* uplo, lapack_int* n, double* ap, lapack_int *info );
void LAPACK_cpptrf( char* uplo, lapack_int* n, lapack_complex_float* ap,
lapack_int *info );
void LAPACK_zpptrf( char* uplo, lapack_int* n, lapack_complex_double* ap,
lapack_int *info );
void LAPACK_spbtrf( char* uplo, lapack_int* n, lapack_int* kd, float* ab,
lapack_int* ldab, lapack_int *info );
void LAPACK_dpbtrf( char* uplo, lapack_int* n, lapack_int* kd, double* ab,
lapack_int* ldab, lapack_int *info );
void LAPACK_cpbtrf( char* uplo, lapack_int* n, lapack_int* kd,
lapack_complex_float* ab, lapack_int* ldab,
lapack_int *info );
void LAPACK_zpbtrf( char* uplo, lapack_int* n, lapack_int* kd,
lapack_complex_double* ab, lapack_int* ldab,
lapack_int *info );
void LAPACK_spttrf( lapack_int* n, float* d, float* e, lapack_int *info );
void LAPACK_dpttrf( lapack_int* n, double* d, double* e, lapack_int *info );
void LAPACK_cpttrf( lapack_int* n, float* d, lapack_complex_float* e,
lapack_int *info );
void LAPACK_zpttrf( lapack_int* n, double* d, lapack_complex_double* e,
lapack_int *info );
void LAPACK_ssytrf( char* uplo, lapack_int* n, float* a, lapack_int* lda,
lapack_int* ipiv, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dsytrf( char* uplo, lapack_int* n, double* a, lapack_int* lda,
lapack_int* ipiv, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_csytrf( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int* ipiv,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zsytrf( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int* ipiv,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_chetrf( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int* ipiv,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zhetrf( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int* ipiv,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_ssptrf( char* uplo, lapack_int* n, float* ap, lapack_int* ipiv,
lapack_int *info );
void LAPACK_dsptrf( char* uplo, lapack_int* n, double* ap, lapack_int* ipiv,
lapack_int *info );
void LAPACK_csptrf( char* uplo, lapack_int* n, lapack_complex_float* ap,
lapack_int* ipiv, lapack_int *info );
void LAPACK_zsptrf( char* uplo, lapack_int* n, lapack_complex_double* ap,
lapack_int* ipiv, lapack_int *info );
void LAPACK_chptrf( char* uplo, lapack_int* n, lapack_complex_float* ap,
lapack_int* ipiv, lapack_int *info );
void LAPACK_zhptrf( char* uplo, lapack_int* n, lapack_complex_double* ap,
lapack_int* ipiv, lapack_int *info );
void LAPACK_sgetrs( char* trans, lapack_int* n, lapack_int* nrhs,
const float* a, lapack_int* lda, const lapack_int* ipiv,
float* b, lapack_int* ldb, lapack_int *info );
void LAPACK_dgetrs( char* trans, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, const lapack_int* ipiv,
double* b, lapack_int* ldb, lapack_int *info );
void LAPACK_cgetrs( char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_zgetrs( char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv, lapack_complex_double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_sgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, const float* ab, lapack_int* ldab,
const lapack_int* ipiv, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, const double* ab, lapack_int* ldab,
const lapack_int* ipiv, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_cgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, const lapack_complex_float* ab,
lapack_int* ldab, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_zgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, const lapack_complex_double* ab,
lapack_int* ldab, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_sgttrs( char* trans, lapack_int* n, lapack_int* nrhs,
const float* dl, const float* d, const float* du,
const float* du2, const lapack_int* ipiv, float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_dgttrs( char* trans, lapack_int* n, lapack_int* nrhs,
const double* dl, const double* d, const double* du,
const double* du2, const lapack_int* ipiv, double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_cgttrs( char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
const lapack_complex_float* du2, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_zgttrs( char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
const lapack_complex_double* du2, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_spotrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,
lapack_int* lda, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dpotrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_cpotrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_zpotrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,
const double* a, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_spftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,
const float* a, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_zpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_complex_double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_cpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_complex_float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_spptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const float* ap, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dpptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* ap, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_cpptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* ap, lapack_complex_float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_zpptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* ap, lapack_complex_double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_spbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
const float* ab, lapack_int* ldab, float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_dpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
const double* ab, lapack_int* ldab, double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_cpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
const lapack_complex_float* ab, lapack_int* ldab,
lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_zpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
const lapack_complex_double* ab, lapack_int* ldab,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_spttrs( lapack_int* n, lapack_int* nrhs, const float* d,
const float* e, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dpttrs( lapack_int* n, lapack_int* nrhs, const double* d,
const double* e, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_cpttrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d,
const lapack_complex_float* e, lapack_complex_float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_zpttrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* d, const lapack_complex_double* e,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_ssytrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,
lapack_int* lda, const lapack_int* ipiv, float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_dsytrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, const lapack_int* ipiv,
double* b, lapack_int* ldb, lapack_int *info );
void LAPACK_csytrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_zsytrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv, lapack_complex_double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_chetrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_int* ipiv, lapack_complex_float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_zhetrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv, lapack_complex_double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_ssptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const float* ap, const lapack_int* ipiv, float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_dsptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* ap, const lapack_int* ipiv, double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_csptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* ap, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_zsptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* ap, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_chptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* ap, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_zhptrs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* ap, const lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_strtrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const float* a, lapack_int* lda, float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_dtrtrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const double* a, lapack_int* lda,
double* b, lapack_int* ldb, lapack_int *info );
void LAPACK_ctrtrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_ztrtrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_stptrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const float* ap, float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_dtptrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const double* ap, double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_ctptrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const lapack_complex_float* ap,
lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_ztptrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const lapack_complex_double* ap,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_stbtrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* kd, lapack_int* nrhs, const float* ab,
lapack_int* ldab, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dtbtrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* kd, lapack_int* nrhs, const double* ab,
lapack_int* ldab, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_ctbtrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* kd, lapack_int* nrhs,
const lapack_complex_float* ab, lapack_int* ldab,
lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_ztbtrs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* kd, lapack_int* nrhs,
const lapack_complex_double* ab, lapack_int* ldab,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_sgecon( char* norm, lapack_int* n, const float* a, lapack_int* lda,
float* anorm, float* rcond, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dgecon( char* norm, lapack_int* n, const double* a, lapack_int* lda,
double* anorm, double* rcond, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_cgecon( char* norm, lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, float* anorm, float* rcond,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zgecon( char* norm, lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, double* anorm, double* rcond,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,
const float* ab, lapack_int* ldab, const lapack_int* ipiv,
float* anorm, float* rcond, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,
const double* ab, lapack_int* ldab, const lapack_int* ipiv,
double* anorm, double* rcond, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_cgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,
const lapack_complex_float* ab, lapack_int* ldab,
const lapack_int* ipiv, float* anorm, float* rcond,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,
const lapack_complex_double* ab, lapack_int* ldab,
const lapack_int* ipiv, double* anorm, double* rcond,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sgtcon( char* norm, lapack_int* n, const float* dl, const float* d,
const float* du, const float* du2, const lapack_int* ipiv,
float* anorm, float* rcond, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dgtcon( char* norm, lapack_int* n, const double* dl,
const double* d, const double* du, const double* du2,
const lapack_int* ipiv, double* anorm, double* rcond,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_cgtcon( char* norm, lapack_int* n, const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
const lapack_complex_float* du2, const lapack_int* ipiv,
float* anorm, float* rcond, lapack_complex_float* work,
lapack_int *info );
void LAPACK_zgtcon( char* norm, lapack_int* n, const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
const lapack_complex_double* du2, const lapack_int* ipiv,
double* anorm, double* rcond, lapack_complex_double* work,
lapack_int *info );
void LAPACK_spocon( char* uplo, lapack_int* n, const float* a, lapack_int* lda,
float* anorm, float* rcond, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dpocon( char* uplo, lapack_int* n, const double* a, lapack_int* lda,
double* anorm, double* rcond, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_cpocon( char* uplo, lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, float* anorm, float* rcond,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zpocon( char* uplo, lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, double* anorm, double* rcond,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sppcon( char* uplo, lapack_int* n, const float* ap, float* anorm,
float* rcond, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dppcon( char* uplo, lapack_int* n, const double* ap, double* anorm,
double* rcond, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_cppcon( char* uplo, lapack_int* n, const lapack_complex_float* ap,
float* anorm, float* rcond, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zppcon( char* uplo, lapack_int* n, const lapack_complex_double* ap,
double* anorm, double* rcond, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_spbcon( char* uplo, lapack_int* n, lapack_int* kd, const float* ab,
lapack_int* ldab, float* anorm, float* rcond, float* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_dpbcon( char* uplo, lapack_int* n, lapack_int* kd, const double* ab,
lapack_int* ldab, double* anorm, double* rcond,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_cpbcon( char* uplo, lapack_int* n, lapack_int* kd,
const lapack_complex_float* ab, lapack_int* ldab,
float* anorm, float* rcond, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zpbcon( char* uplo, lapack_int* n, lapack_int* kd,
const lapack_complex_double* ab, lapack_int* ldab,
double* anorm, double* rcond, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_sptcon( lapack_int* n, const float* d, const float* e, float* anorm,
float* rcond, float* work, lapack_int *info );
void LAPACK_dptcon( lapack_int* n, const double* d, const double* e,
double* anorm, double* rcond, double* work,
lapack_int *info );
void LAPACK_cptcon( lapack_int* n, const float* d,
const lapack_complex_float* e, float* anorm, float* rcond,
float* work, lapack_int *info );
void LAPACK_zptcon( lapack_int* n, const double* d,
const lapack_complex_double* e, double* anorm,
double* rcond, double* work, lapack_int *info );
void LAPACK_ssycon( char* uplo, lapack_int* n, const float* a, lapack_int* lda,
const lapack_int* ipiv, float* anorm, float* rcond,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_dsycon( char* uplo, lapack_int* n, const double* a, lapack_int* lda,
const lapack_int* ipiv, double* anorm, double* rcond,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_csycon( char* uplo, lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, const lapack_int* ipiv, float* anorm,
float* rcond, lapack_complex_float* work,
lapack_int *info );
void LAPACK_zsycon( char* uplo, lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, const lapack_int* ipiv, double* anorm,
double* rcond, lapack_complex_double* work,
lapack_int *info );
void LAPACK_checon( char* uplo, lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, const lapack_int* ipiv, float* anorm,
float* rcond, lapack_complex_float* work,
lapack_int *info );
void LAPACK_zhecon( char* uplo, lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, const lapack_int* ipiv, double* anorm,
double* rcond, lapack_complex_double* work,
lapack_int *info );
void LAPACK_sspcon( char* uplo, lapack_int* n, const float* ap,
const lapack_int* ipiv, float* anorm, float* rcond,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_dspcon( char* uplo, lapack_int* n, const double* ap,
const lapack_int* ipiv, double* anorm, double* rcond,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_cspcon( char* uplo, lapack_int* n, const lapack_complex_float* ap,
const lapack_int* ipiv, float* anorm, float* rcond,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zspcon( char* uplo, lapack_int* n, const lapack_complex_double* ap,
const lapack_int* ipiv, double* anorm, double* rcond,
lapack_complex_double* work, lapack_int *info );
void LAPACK_chpcon( char* uplo, lapack_int* n, const lapack_complex_float* ap,
const lapack_int* ipiv, float* anorm, float* rcond,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zhpcon( char* uplo, lapack_int* n, const lapack_complex_double* ap,
const lapack_int* ipiv, double* anorm, double* rcond,
lapack_complex_double* work, lapack_int *info );
void LAPACK_strcon( char* norm, char* uplo, char* diag, lapack_int* n,
const float* a, lapack_int* lda, float* rcond, float* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_dtrcon( char* norm, char* uplo, char* diag, lapack_int* n,
const double* a, lapack_int* lda, double* rcond,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_ctrcon( char* norm, char* uplo, char* diag, lapack_int* n,
const lapack_complex_float* a, lapack_int* lda,
float* rcond, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_ztrcon( char* norm, char* uplo, char* diag, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda,
double* rcond, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_stpcon( char* norm, char* uplo, char* diag, lapack_int* n,
const float* ap, float* rcond, float* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_dtpcon( char* norm, char* uplo, char* diag, lapack_int* n,
const double* ap, double* rcond, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_ctpcon( char* norm, char* uplo, char* diag, lapack_int* n,
const lapack_complex_float* ap, float* rcond,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_ztpcon( char* norm, char* uplo, char* diag, lapack_int* n,
const lapack_complex_double* ap, double* rcond,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_stbcon( char* norm, char* uplo, char* diag, lapack_int* n,
lapack_int* kd, const float* ab, lapack_int* ldab,
float* rcond, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dtbcon( char* norm, char* uplo, char* diag, lapack_int* n,
lapack_int* kd, const double* ab, lapack_int* ldab,
double* rcond, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_ctbcon( char* norm, char* uplo, char* diag, lapack_int* n,
lapack_int* kd, const lapack_complex_float* ab,
lapack_int* ldab, float* rcond, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_ztbcon( char* norm, char* uplo, char* diag, lapack_int* n,
lapack_int* kd, const lapack_complex_double* ab,
lapack_int* ldab, double* rcond,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sgerfs( char* trans, lapack_int* n, lapack_int* nrhs,
const float* a, lapack_int* lda, const float* af,
lapack_int* ldaf, const lapack_int* ipiv, const float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* ferr,
float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dgerfs( char* trans, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, const double* af,
lapack_int* ldaf, const lapack_int* ipiv, const double* b,
lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,
double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_cgerfs( char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* af, lapack_int* ldaf,
const lapack_int* ipiv, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* ferr, float* berr, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zgerfs( char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* af, lapack_int* ldaf,
const lapack_int* ipiv, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_dgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, const double* af,
lapack_int* ldaf, const lapack_int* ipiv, const double* r,
const double* c, const double* b, lapack_int* ldb,
double* x, lapack_int* ldx, double* rcond, double* berr,
lapack_int* n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int* nparams, double* params,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_sgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,
const float* a, lapack_int* lda, const float* af,
lapack_int* ldaf, const lapack_int* ipiv, const float* r,
const float* c, const float* b, lapack_int* ldb, float* x,
lapack_int* ldx, float* rcond, float* berr,
lapack_int* n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int* nparams, float* params,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_zgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* af, lapack_int* ldaf,
const lapack_int* ipiv, const double* r, const double* c,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_cgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* af, lapack_int* ldaf,
const lapack_int* ipiv, const float* r, const float* c,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_sgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, const float* ab, lapack_int* ldab,
const float* afb, lapack_int* ldafb, const lapack_int* ipiv,
const float* b, lapack_int* ldb, float* x, lapack_int* ldx,
float* ferr, float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, const double* ab, lapack_int* ldab,
const double* afb, lapack_int* ldafb,
const lapack_int* ipiv, const double* b, lapack_int* ldb,
double* x, lapack_int* ldx, double* ferr, double* berr,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_cgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, const lapack_complex_float* ab,
lapack_int* ldab, const lapack_complex_float* afb,
lapack_int* ldafb, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, const lapack_complex_double* ab,
lapack_int* ldab, const lapack_complex_double* afb,
lapack_int* ldafb, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* ferr,
double* berr, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_dgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs, const double* ab,
lapack_int* ldab, const double* afb, lapack_int* ldafb,
const lapack_int* ipiv, const double* r, const double* c,
const double* b, lapack_int* ldb, double* x,
lapack_int* ldx, double* rcond, double* berr,
lapack_int* n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int* nparams, double* params,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_sgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs, const float* ab,
lapack_int* ldab, const float* afb, lapack_int* ldafb,
const lapack_int* ipiv, const float* r, const float* c,
const float* b, lapack_int* ldb, float* x, lapack_int* ldx,
float* rcond, float* berr, lapack_int* n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int* nparams, float* params, float* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_zgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs,
const lapack_complex_double* ab, lapack_int* ldab,
const lapack_complex_double* afb, lapack_int* ldafb,
const lapack_int* ipiv, const double* r, const double* c,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_cgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs,
const lapack_complex_float* ab, lapack_int* ldab,
const lapack_complex_float* afb, lapack_int* ldafb,
const lapack_int* ipiv, const float* r, const float* c,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_sgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,
const float* dl, const float* d, const float* du,
const float* dlf, const float* df, const float* duf,
const float* du2, const lapack_int* ipiv, const float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* ferr,
float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,
const double* dl, const double* d, const double* du,
const double* dlf, const double* df, const double* duf,
const double* du2, const lapack_int* ipiv, const double* b,
lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,
double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_cgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du,
const lapack_complex_float* dlf,
const lapack_complex_float* df,
const lapack_complex_float* duf,
const lapack_complex_float* du2, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du,
const lapack_complex_double* dlf,
const lapack_complex_double* df,
const lapack_complex_double* duf,
const lapack_complex_double* du2, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* ferr,
double* berr, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sporfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,
lapack_int* lda, const float* af, lapack_int* ldaf,
const float* b, lapack_int* ldb, float* x, lapack_int* ldx,
float* ferr, float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dporfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, const double* af,
lapack_int* ldaf, const double* b, lapack_int* ldb,
double* x, lapack_int* ldx, double* ferr, double* berr,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_cporfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* af, lapack_int* ldaf,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zporfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* af, lapack_int* ldaf,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* ferr,
double* berr, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_dporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, const double* af,
lapack_int* ldaf, const double* s, const double* b,
lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,
double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_sporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const float* a, lapack_int* lda, const float* af,
lapack_int* ldaf, const float* s, const float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,
float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int* nparams, float* params,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_zporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* af, lapack_int* ldaf,
const double* s, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* rcond, double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_cporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* af, lapack_int* ldaf,
const float* s, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* rcond, float* berr, lapack_int* n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_spprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const float* ap, const float* afp, const float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* ferr,
float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dpprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* ap, const double* afp, const double* b,
lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,
double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_cpprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* ap,
const lapack_complex_float* afp,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zpprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* ap,
const lapack_complex_double* afp,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* ferr,
double* berr, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_spbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
const float* ab, lapack_int* ldab, const float* afb,
lapack_int* ldafb, const float* b, lapack_int* ldb,
float* x, lapack_int* ldx, float* ferr, float* berr,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_dpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
const double* ab, lapack_int* ldab, const double* afb,
lapack_int* ldafb, const double* b, lapack_int* ldb,
double* x, lapack_int* ldx, double* ferr, double* berr,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_cpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
const lapack_complex_float* ab, lapack_int* ldab,
const lapack_complex_float* afb, lapack_int* ldafb,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
const lapack_complex_double* ab, lapack_int* ldab,
const lapack_complex_double* afb, lapack_int* ldafb,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* ferr,
double* berr, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sptrfs( lapack_int* n, lapack_int* nrhs, const float* d,
const float* e, const float* df, const float* ef,
const float* b, lapack_int* ldb, float* x, lapack_int* ldx,
float* ferr, float* berr, float* work, lapack_int *info );
void LAPACK_dptrfs( lapack_int* n, lapack_int* nrhs, const double* d,
const double* e, const double* df, const double* ef,
const double* b, lapack_int* ldb, double* x,
lapack_int* ldx, double* ferr, double* berr, double* work,
lapack_int *info );
void LAPACK_cptrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d,
const lapack_complex_float* e, const float* df,
const lapack_complex_float* ef,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zptrfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* d, const lapack_complex_double* e,
const double* df, const lapack_complex_double* ef,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* ferr,
double* berr, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_ssyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,
lapack_int* lda, const float* af, lapack_int* ldaf,
const lapack_int* ipiv, const float* b, lapack_int* ldb,
float* x, lapack_int* ldx, float* ferr, float* berr,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_dsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, const double* af,
lapack_int* ldaf, const lapack_int* ipiv, const double* b,
lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,
double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_csyrfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* af, lapack_int* ldaf,
const lapack_int* ipiv, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* ferr, float* berr, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* af, lapack_int* ldaf,
const lapack_int* ipiv, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_dsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, const double* af,
lapack_int* ldaf, const lapack_int* ipiv, const double* s,
const double* b, lapack_int* ldb, double* x,
lapack_int* ldx, double* rcond, double* berr,
lapack_int* n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int* nparams, double* params,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_ssyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const float* a, lapack_int* lda, const float* af,
lapack_int* ldaf, const lapack_int* ipiv, const float* s,
const float* b, lapack_int* ldb, float* x, lapack_int* ldx,
float* rcond, float* berr, lapack_int* n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int* nparams, float* params, float* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_zsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* af, lapack_int* ldaf,
const lapack_int* ipiv, const double* s,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_csyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* af, lapack_int* ldaf,
const lapack_int* ipiv, const float* s,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_cherfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* af, lapack_int* ldaf,
const lapack_int* ipiv, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* ferr, float* berr, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zherfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* af, lapack_int* ldaf,
const lapack_int* ipiv, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_zherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* af, lapack_int* ldaf,
const lapack_int* ipiv, const double* s,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_cherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* af, lapack_int* ldaf,
const lapack_int* ipiv, const float* s,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_ssprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const float* ap, const float* afp, const lapack_int* ipiv,
const float* b, lapack_int* ldb, float* x, lapack_int* ldx,
float* ferr, float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dsprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const double* ap, const double* afp, const lapack_int* ipiv,
const double* b, lapack_int* ldb, double* x,
lapack_int* ldx, double* ferr, double* berr, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_csprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* ap,
const lapack_complex_float* afp, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zsprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* ap,
const lapack_complex_double* afp, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* ferr,
double* berr, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_chprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* ap,
const lapack_complex_float* afp, const lapack_int* ipiv,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zhprfs( char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* ap,
const lapack_complex_double* afp, const lapack_int* ipiv,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* ferr,
double* berr, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_strrfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const float* a, lapack_int* lda,
const float* b, lapack_int* ldb, const float* x,
lapack_int* ldx, float* ferr, float* berr, float* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_dtrrfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const double* a, lapack_int* lda,
const double* b, lapack_int* ldb, const double* x,
lapack_int* ldx, double* ferr, double* berr, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_ctrrfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* b,
lapack_int* ldb, const lapack_complex_float* x,
lapack_int* ldx, float* ferr, float* berr,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_ztrrfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const lapack_complex_double* a,
lapack_int* lda, const lapack_complex_double* b,
lapack_int* ldb, const lapack_complex_double* x,
lapack_int* ldx, double* ferr, double* berr,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_stprfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const float* ap, const float* b,
lapack_int* ldb, const float* x, lapack_int* ldx,
float* ferr, float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dtprfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const double* ap, const double* b,
lapack_int* ldb, const double* x, lapack_int* ldx,
double* ferr, double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_ctprfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const lapack_complex_float* ap,
const lapack_complex_float* b, lapack_int* ldb,
const lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_ztprfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* nrhs, const lapack_complex_double* ap,
const lapack_complex_double* b, lapack_int* ldb,
const lapack_complex_double* x, lapack_int* ldx,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_stbrfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* kd, lapack_int* nrhs, const float* ab,
lapack_int* ldab, const float* b, lapack_int* ldb,
const float* x, lapack_int* ldx, float* ferr, float* berr,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_dtbrfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* kd, lapack_int* nrhs, const double* ab,
lapack_int* ldab, const double* b, lapack_int* ldb,
const double* x, lapack_int* ldx, double* ferr,
double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_ctbrfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* kd, lapack_int* nrhs,
const lapack_complex_float* ab, lapack_int* ldab,
const lapack_complex_float* b, lapack_int* ldb,
const lapack_complex_float* x, lapack_int* ldx, float* ferr,
float* berr, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_ztbrfs( char* uplo, char* trans, char* diag, lapack_int* n,
lapack_int* kd, lapack_int* nrhs,
const lapack_complex_double* ab, lapack_int* ldab,
const lapack_complex_double* b, lapack_int* ldb,
const lapack_complex_double* x, lapack_int* ldx,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_sgetri( lapack_int* n, float* a, lapack_int* lda,
const lapack_int* ipiv, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dgetri( lapack_int* n, double* a, lapack_int* lda,
const lapack_int* ipiv, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cgetri( lapack_int* n, lapack_complex_float* a, lapack_int* lda,
const lapack_int* ipiv, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zgetri( lapack_int* n, lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv, lapack_complex_double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_spotri( char* uplo, lapack_int* n, float* a, lapack_int* lda,
lapack_int *info );
void LAPACK_dpotri( char* uplo, lapack_int* n, double* a, lapack_int* lda,
lapack_int *info );
void LAPACK_cpotri( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int *info );
void LAPACK_zpotri( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int *info );
void LAPACK_dpftri( char* transr, char* uplo, lapack_int* n, double* a,
lapack_int *info );
void LAPACK_spftri( char* transr, char* uplo, lapack_int* n, float* a,
lapack_int *info );
void LAPACK_zpftri( char* transr, char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int *info );
void LAPACK_cpftri( char* transr, char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int *info );
void LAPACK_spptri( char* uplo, lapack_int* n, float* ap, lapack_int *info );
void LAPACK_dpptri( char* uplo, lapack_int* n, double* ap, lapack_int *info );
void LAPACK_cpptri( char* uplo, lapack_int* n, lapack_complex_float* ap,
lapack_int *info );
void LAPACK_zpptri( char* uplo, lapack_int* n, lapack_complex_double* ap,
lapack_int *info );
void LAPACK_ssytri( char* uplo, lapack_int* n, float* a, lapack_int* lda,
const lapack_int* ipiv, float* work, lapack_int *info );
void LAPACK_dsytri( char* uplo, lapack_int* n, double* a, lapack_int* lda,
const lapack_int* ipiv, double* work, lapack_int *info );
void LAPACK_csytri( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, const lapack_int* ipiv,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zsytri( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, const lapack_int* ipiv,
lapack_complex_double* work, lapack_int *info );
void LAPACK_chetri( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, const lapack_int* ipiv,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zhetri( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, const lapack_int* ipiv,
lapack_complex_double* work, lapack_int *info );
void LAPACK_ssptri( char* uplo, lapack_int* n, float* ap,
const lapack_int* ipiv, float* work, lapack_int *info );
void LAPACK_dsptri( char* uplo, lapack_int* n, double* ap,
const lapack_int* ipiv, double* work, lapack_int *info );
void LAPACK_csptri( char* uplo, lapack_int* n, lapack_complex_float* ap,
const lapack_int* ipiv, lapack_complex_float* work,
lapack_int *info );
void LAPACK_zsptri( char* uplo, lapack_int* n, lapack_complex_double* ap,
const lapack_int* ipiv, lapack_complex_double* work,
lapack_int *info );
void LAPACK_chptri( char* uplo, lapack_int* n, lapack_complex_float* ap,
const lapack_int* ipiv, lapack_complex_float* work,
lapack_int *info );
void LAPACK_zhptri( char* uplo, lapack_int* n, lapack_complex_double* ap,
const lapack_int* ipiv, lapack_complex_double* work,
lapack_int *info );
void LAPACK_strtri( char* uplo, char* diag, lapack_int* n, float* a,
lapack_int* lda, lapack_int *info );
void LAPACK_dtrtri( char* uplo, char* diag, lapack_int* n, double* a,
lapack_int* lda, lapack_int *info );
void LAPACK_ctrtri( char* uplo, char* diag, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
lapack_int *info );
void LAPACK_ztrtri( char* uplo, char* diag, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
lapack_int *info );
void LAPACK_dtftri( char* transr, char* uplo, char* diag, lapack_int* n,
double* a, lapack_int *info );
void LAPACK_stftri( char* transr, char* uplo, char* diag, lapack_int* n,
float* a, lapack_int *info );
void LAPACK_ztftri( char* transr, char* uplo, char* diag, lapack_int* n,
lapack_complex_double* a, lapack_int *info );
void LAPACK_ctftri( char* transr, char* uplo, char* diag, lapack_int* n,
lapack_complex_float* a, lapack_int *info );
void LAPACK_stptri( char* uplo, char* diag, lapack_int* n, float* ap,
lapack_int *info );
void LAPACK_dtptri( char* uplo, char* diag, lapack_int* n, double* ap,
lapack_int *info );
void LAPACK_ctptri( char* uplo, char* diag, lapack_int* n,
lapack_complex_float* ap, lapack_int *info );
void LAPACK_ztptri( char* uplo, char* diag, lapack_int* n,
lapack_complex_double* ap, lapack_int *info );
void LAPACK_sgeequ( lapack_int* m, lapack_int* n, const float* a,
lapack_int* lda, float* r, float* c, float* rowcnd,
float* colcnd, float* amax, lapack_int *info );
void LAPACK_dgeequ( lapack_int* m, lapack_int* n, const double* a,
lapack_int* lda, double* r, double* c, double* rowcnd,
double* colcnd, double* amax, lapack_int *info );
void LAPACK_cgeequ( lapack_int* m, lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, float* r, float* c, float* rowcnd,
float* colcnd, float* amax, lapack_int *info );
void LAPACK_zgeequ( lapack_int* m, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda, double* r,
double* c, double* rowcnd, double* colcnd, double* amax,
lapack_int *info );
void LAPACK_dgeequb( lapack_int* m, lapack_int* n, const double* a,
lapack_int* lda, double* r, double* c, double* rowcnd,
double* colcnd, double* amax, lapack_int *info );
void LAPACK_sgeequb( lapack_int* m, lapack_int* n, const float* a,
lapack_int* lda, float* r, float* c, float* rowcnd,
float* colcnd, float* amax, lapack_int *info );
void LAPACK_zgeequb( lapack_int* m, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda, double* r,
double* c, double* rowcnd, double* colcnd, double* amax,
lapack_int *info );
void LAPACK_cgeequb( lapack_int* m, lapack_int* n,
const lapack_complex_float* a, lapack_int* lda, float* r,
float* c, float* rowcnd, float* colcnd, float* amax,
lapack_int *info );
void LAPACK_sgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const float* ab, lapack_int* ldab, float* r,
float* c, float* rowcnd, float* colcnd, float* amax,
lapack_int *info );
void LAPACK_dgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const double* ab, lapack_int* ldab,
double* r, double* c, double* rowcnd, double* colcnd,
double* amax, lapack_int *info );
void LAPACK_cgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const lapack_complex_float* ab,
lapack_int* ldab, float* r, float* c, float* rowcnd,
float* colcnd, float* amax, lapack_int *info );
void LAPACK_zgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const lapack_complex_double* ab,
lapack_int* ldab, double* r, double* c, double* rowcnd,
double* colcnd, double* amax, lapack_int *info );
void LAPACK_dgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const double* ab, lapack_int* ldab,
double* r, double* c, double* rowcnd, double* colcnd,
double* amax, lapack_int *info );
void LAPACK_sgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const float* ab, lapack_int* ldab,
float* r, float* c, float* rowcnd, float* colcnd,
float* amax, lapack_int *info );
void LAPACK_zgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const lapack_complex_double* ab,
lapack_int* ldab, double* r, double* c, double* rowcnd,
double* colcnd, double* amax, lapack_int *info );
void LAPACK_cgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const lapack_complex_float* ab,
lapack_int* ldab, float* r, float* c, float* rowcnd,
float* colcnd, float* amax, lapack_int *info );
void LAPACK_spoequ( lapack_int* n, const float* a, lapack_int* lda, float* s,
float* scond, float* amax, lapack_int *info );
void LAPACK_dpoequ( lapack_int* n, const double* a, lapack_int* lda, double* s,
double* scond, double* amax, lapack_int *info );
void LAPACK_cpoequ( lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, float* s, float* scond, float* amax,
lapack_int *info );
void LAPACK_zpoequ( lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, double* s, double* scond, double* amax,
lapack_int *info );
void LAPACK_dpoequb( lapack_int* n, const double* a, lapack_int* lda, double* s,
double* scond, double* amax, lapack_int *info );
void LAPACK_spoequb( lapack_int* n, const float* a, lapack_int* lda, float* s,
float* scond, float* amax, lapack_int *info );
void LAPACK_zpoequb( lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, double* s, double* scond, double* amax,
lapack_int *info );
void LAPACK_cpoequb( lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, float* s, float* scond, float* amax,
lapack_int *info );
void LAPACK_sppequ( char* uplo, lapack_int* n, const float* ap, float* s,
float* scond, float* amax, lapack_int *info );
void LAPACK_dppequ( char* uplo, lapack_int* n, const double* ap, double* s,
double* scond, double* amax, lapack_int *info );
void LAPACK_cppequ( char* uplo, lapack_int* n, const lapack_complex_float* ap,
float* s, float* scond, float* amax, lapack_int *info );
void LAPACK_zppequ( char* uplo, lapack_int* n, const lapack_complex_double* ap,
double* s, double* scond, double* amax, lapack_int *info );
void LAPACK_spbequ( char* uplo, lapack_int* n, lapack_int* kd, const float* ab,
lapack_int* ldab, float* s, float* scond, float* amax,
lapack_int *info );
void LAPACK_dpbequ( char* uplo, lapack_int* n, lapack_int* kd, const double* ab,
lapack_int* ldab, double* s, double* scond, double* amax,
lapack_int *info );
void LAPACK_cpbequ( char* uplo, lapack_int* n, lapack_int* kd,
const lapack_complex_float* ab, lapack_int* ldab, float* s,
float* scond, float* amax, lapack_int *info );
void LAPACK_zpbequ( char* uplo, lapack_int* n, lapack_int* kd,
const lapack_complex_double* ab, lapack_int* ldab,
double* s, double* scond, double* amax, lapack_int *info );
void LAPACK_dsyequb( char* uplo, lapack_int* n, const double* a,
lapack_int* lda, double* s, double* scond, double* amax,
double* work, lapack_int *info );
void LAPACK_ssyequb( char* uplo, lapack_int* n, const float* a, lapack_int* lda,
float* s, float* scond, float* amax, float* work,
lapack_int *info );
void LAPACK_zsyequb( char* uplo, lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, double* s, double* scond, double* amax,
lapack_complex_double* work, lapack_int *info );
void LAPACK_csyequb( char* uplo, lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, float* s, float* scond, float* amax,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zheequb( char* uplo, lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, double* s, double* scond, double* amax,
lapack_complex_double* work, lapack_int *info );
void LAPACK_cheequb( char* uplo, lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, float* s, float* scond, float* amax,
lapack_complex_float* work, lapack_int *info );
void LAPACK_sgesv( lapack_int* n, lapack_int* nrhs, float* a, lapack_int* lda,
lapack_int* ipiv, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda,
lapack_int* ipiv, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_cgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* a,
lapack_int* lda, lapack_int* ipiv, lapack_complex_float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_zgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a,
lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_dsgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda,
lapack_int* ipiv, double* b, lapack_int* ldb, double* x,
lapack_int* ldx, double* work, float* swork,
lapack_int* iter, lapack_int *info );
void LAPACK_zcgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a,
lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
lapack_complex_double* work, lapack_complex_float* swork,
double* rwork, lapack_int* iter, lapack_int *info );
void LAPACK_sgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
float* a, lapack_int* lda, float* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, float* r, float* c, float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
double* a, lapack_int* lda, double* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, double* r, double* c,
double* b, lapack_int* ldb, double* x, lapack_int* ldx,
double* rcond, double* ferr, double* berr, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_cgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, float* r, float* c,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, double* r, double* c,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_dgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
double* a, lapack_int* lda, double* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, double* r, double* c,
double* b, lapack_int* ldb, double* x, lapack_int* ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int* n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int* nparams, double* params,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_sgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
float* a, lapack_int* lda, float* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, float* r, float* c,
float* b, lapack_int* ldb, float* x, lapack_int* ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int* n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int* nparams, float* params,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_zgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, double* r, double* c,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* rpvgrw, double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_cgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, float* r, float* c,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* rpvgrw, float* berr, lapack_int* n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_sgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, float* ab, lapack_int* ldab,
lapack_int* ipiv, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, double* ab, lapack_int* ldab,
lapack_int* ipiv, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_cgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, lapack_complex_float* ab, lapack_int* ldab,
lapack_int* ipiv, lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_zgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,
lapack_int* nrhs, lapack_complex_double* ab,
lapack_int* ldab, lapack_int* ipiv, lapack_complex_double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_sgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs, float* ab,
lapack_int* ldab, float* afb, lapack_int* ldafb,
lapack_int* ipiv, char* equed, float* r, float* c, float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs, double* ab,
lapack_int* ldab, double* afb, lapack_int* ldafb,
lapack_int* ipiv, char* equed, double* r, double* c,
double* b, lapack_int* ldb, double* x, lapack_int* ldx,
double* rcond, double* ferr, double* berr, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_cgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab,
lapack_int* ldab, lapack_complex_float* afb,
lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r,
float* c, lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs, lapack_complex_double* ab,
lapack_int* ldab, lapack_complex_double* afb,
lapack_int* ldafb, lapack_int* ipiv, char* equed, double* r,
double* c, lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_dgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs, double* ab,
lapack_int* ldab, double* afb, lapack_int* ldafb,
lapack_int* ipiv, char* equed, double* r, double* c,
double* b, lapack_int* ldb, double* x, lapack_int* ldx,
double* rcond, double* rpvgrw, double* berr,
lapack_int* n_err_bnds, double* err_bnds_norm,
double* err_bnds_comp, lapack_int* nparams, double* params,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_sgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs, float* ab,
lapack_int* ldab, float* afb, lapack_int* ldafb,
lapack_int* ipiv, char* equed, float* r, float* c,
float* b, lapack_int* ldb, float* x, lapack_int* ldx,
float* rcond, float* rpvgrw, float* berr,
lapack_int* n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int* nparams, float* params,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_zgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs,
lapack_complex_double* ab, lapack_int* ldab,
lapack_complex_double* afb, lapack_int* ldafb,
lapack_int* ipiv, char* equed, double* r, double* c,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* rpvgrw, double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_cgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,
lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab,
lapack_int* ldab, lapack_complex_float* afb,
lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r,
float* c, lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* rpvgrw, float* berr, lapack_int* n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_sgtsv( lapack_int* n, lapack_int* nrhs, float* dl, float* d,
float* du, float* b, lapack_int* ldb, lapack_int *info );
void LAPACK_dgtsv( lapack_int* n, lapack_int* nrhs, double* dl, double* d,
double* du, double* b, lapack_int* ldb, lapack_int *info );
void LAPACK_cgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* dl,
lapack_complex_float* d, lapack_complex_float* du,
lapack_complex_float* b, lapack_int* ldb, lapack_int *info );
void LAPACK_zgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* dl,
lapack_complex_double* d, lapack_complex_double* du,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_sgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
const float* dl, const float* d, const float* du,
float* dlf, float* df, float* duf, float* du2,
lapack_int* ipiv, const float* b, lapack_int* ldb, float* x,
lapack_int* ldx, float* rcond, float* ferr, float* berr,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_dgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
const double* dl, const double* d, const double* du,
double* dlf, double* df, double* duf, double* du2,
lapack_int* ipiv, const double* b, lapack_int* ldb,
double* x, lapack_int* ldx, double* rcond, double* ferr,
double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_cgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* dl,
const lapack_complex_float* d,
const lapack_complex_float* du, lapack_complex_float* dlf,
lapack_complex_float* df, lapack_complex_float* duf,
lapack_complex_float* du2, lapack_int* ipiv,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* dl,
const lapack_complex_double* d,
const lapack_complex_double* du, lapack_complex_double* dlf,
lapack_complex_double* df, lapack_complex_double* duf,
lapack_complex_double* du2, lapack_int* ipiv,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_sposv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a,
lapack_int* lda, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,
lapack_int* lda, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_cposv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, lapack_int *info );
void LAPACK_zposv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dsposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,
lapack_int* lda, double* b, lapack_int* ldb, double* x,
lapack_int* ldx, double* work, float* swork,
lapack_int* iter, lapack_int *info );
void LAPACK_zcposv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx,
lapack_complex_double* work, lapack_complex_float* swork,
double* rwork, lapack_int* iter, lapack_int *info );
void LAPACK_sposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
float* a, lapack_int* lda, float* af, lapack_int* ldaf,
char* equed, float* s, float* b, lapack_int* ldb, float* x,
lapack_int* ldx, float* rcond, float* ferr, float* berr,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_dposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
double* a, lapack_int* lda, double* af, lapack_int* ldaf,
char* equed, double* s, double* b, lapack_int* ldb,
double* x, lapack_int* ldx, double* rcond, double* ferr,
double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_cposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* af, lapack_int* ldaf, char* equed,
float* s, lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* af, lapack_int* ldaf, char* equed,
double* s, lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_dposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
double* a, lapack_int* lda, double* af, lapack_int* ldaf,
char* equed, double* s, double* b, lapack_int* ldb,
double* x, lapack_int* ldx, double* rcond, double* rpvgrw,
double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_sposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
float* a, lapack_int* lda, float* af, lapack_int* ldaf,
char* equed, float* s, float* b, lapack_int* ldb, float* x,
lapack_int* ldx, float* rcond, float* rpvgrw, float* berr,
lapack_int* n_err_bnds, float* err_bnds_norm,
float* err_bnds_comp, lapack_int* nparams, float* params,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_zposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* af, lapack_int* ldaf, char* equed,
double* s, lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* rpvgrw, double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_cposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* af, lapack_int* ldaf, char* equed,
float* s, lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* rpvgrw, float* berr, lapack_int* n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_sppsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap,
float* b, lapack_int* ldb, lapack_int *info );
void LAPACK_dppsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap,
double* b, lapack_int* ldb, lapack_int *info );
void LAPACK_cppsv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* ap, lapack_complex_float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_zppsv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* ap, lapack_complex_double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_sppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
float* ap, float* afp, char* equed, float* s, float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
double* ap, double* afp, char* equed, double* s, double* b,
lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,
double* ferr, double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_cppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* ap, lapack_complex_float* afp,
char* equed, float* s, lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* ap, lapack_complex_double* afp,
char* equed, double* s, lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_spbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
float* ab, lapack_int* ldab, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
double* ab, lapack_int* ldab, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_cpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
lapack_complex_float* ab, lapack_int* ldab,
lapack_complex_float* b, lapack_int* ldb, lapack_int *info );
void LAPACK_zpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,
lapack_complex_double* ab, lapack_int* ldab,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_spbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,
lapack_int* nrhs, float* ab, lapack_int* ldab, float* afb,
lapack_int* ldafb, char* equed, float* s, float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,
lapack_int* nrhs, double* ab, lapack_int* ldab, double* afb,
lapack_int* ldafb, char* equed, double* s, double* b,
lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,
double* ferr, double* berr, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_cpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,
lapack_int* nrhs, lapack_complex_float* ab,
lapack_int* ldab, lapack_complex_float* afb,
lapack_int* ldafb, char* equed, float* s,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,
lapack_int* nrhs, lapack_complex_double* ab,
lapack_int* ldab, lapack_complex_double* afb,
lapack_int* ldafb, char* equed, double* s,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* ferr, double* berr, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_sptsv( lapack_int* n, lapack_int* nrhs, float* d, float* e,
float* b, lapack_int* ldb, lapack_int *info );
void LAPACK_dptsv( lapack_int* n, lapack_int* nrhs, double* d, double* e,
double* b, lapack_int* ldb, lapack_int *info );
void LAPACK_cptsv( lapack_int* n, lapack_int* nrhs, float* d,
lapack_complex_float* e, lapack_complex_float* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_zptsv( lapack_int* n, lapack_int* nrhs, double* d,
lapack_complex_double* e, lapack_complex_double* b,
lapack_int* ldb, lapack_int *info );
void LAPACK_sptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d,
const float* e, float* df, float* ef, const float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, float* work, lapack_int *info );
void LAPACK_dptsvx( char* fact, lapack_int* n, lapack_int* nrhs,
const double* d, const double* e, double* df, double* ef,
const double* b, lapack_int* ldb, double* x,
lapack_int* ldx, double* rcond, double* ferr, double* berr,
double* work, lapack_int *info );
void LAPACK_cptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d,
const lapack_complex_float* e, float* df,
lapack_complex_float* ef, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zptsvx( char* fact, lapack_int* n, lapack_int* nrhs,
const double* d, const lapack_complex_double* e, double* df,
lapack_complex_double* ef, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_ssysv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a,
lapack_int* lda, lapack_int* ipiv, float* b, lapack_int* ldb,
float* work, lapack_int* lwork, lapack_int *info );
void LAPACK_dsysv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,
lapack_int* lda, lapack_int* ipiv, double* b,
lapack_int* ldb, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_csysv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zsysv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_ssysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const float* a, lapack_int* lda, float* af,
lapack_int* ldaf, lapack_int* ipiv, const float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,
float* ferr, float* berr, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_dsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const double* a, lapack_int* lda, double* af,
lapack_int* ldaf, lapack_int* ipiv, const double* b,
lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,
double* ferr, double* berr, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_csysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* af, lapack_int* ldaf,
lapack_int* ipiv, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int *info );
void LAPACK_zsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* af, lapack_int* ldaf,
lapack_int* ipiv, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int *info );
void LAPACK_dsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
double* a, lapack_int* lda, double* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, double* s, double* b,
lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,
double* rpvgrw, double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_ssysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
float* a, lapack_int* lda, float* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, float* s, float* b,
lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,
float* rpvgrw, float* berr, lapack_int* n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int* nparams, float* params, float* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_zsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, double* s,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* rpvgrw, double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_csysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, float* s,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* rpvgrw, float* berr, lapack_int* n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_chesv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zhesv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_chesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* af, lapack_int* ldaf,
lapack_int* ipiv, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int *info );
void LAPACK_zhesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* af, lapack_int* ldaf,
lapack_int* ipiv, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int *info );
void LAPACK_zhesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, double* s,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* x, lapack_int* ldx, double* rcond,
double* rpvgrw, double* berr, lapack_int* n_err_bnds,
double* err_bnds_norm, double* err_bnds_comp,
lapack_int* nparams, double* params,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_chesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* af, lapack_int* ldaf,
lapack_int* ipiv, char* equed, float* s,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* x, lapack_int* ldx, float* rcond,
float* rpvgrw, float* berr, lapack_int* n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int* nparams, float* params,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_sspsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap,
lapack_int* ipiv, float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dspsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap,
lapack_int* ipiv, double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_cspsv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* ap, lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb, lapack_int *info );
void LAPACK_zspsv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* ap, lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_sspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const float* ap, float* afp, lapack_int* ipiv,
const float* b, lapack_int* ldb, float* x, lapack_int* ldx,
float* rcond, float* ferr, float* berr, float* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_dspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const double* ap, double* afp, lapack_int* ipiv,
const double* b, lapack_int* ldb, double* x,
lapack_int* ldx, double* rcond, double* ferr, double* berr,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_cspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* ap, lapack_complex_float* afp,
lapack_int* ipiv, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* ap, lapack_complex_double* afp,
lapack_int* ipiv, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_chpsv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* ap, lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb, lapack_int *info );
void LAPACK_zhpsv( char* uplo, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* ap, lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_chpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_float* ap, lapack_complex_float* afp,
lapack_int* ipiv, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,
float* rcond, float* ferr, float* berr,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zhpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,
const lapack_complex_double* ap, lapack_complex_double* afp,
lapack_int* ipiv, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,
double* rcond, double* ferr, double* berr,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sgeqrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* tau, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dgeqrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* tau, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cgeqrf( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zgeqrf( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sgeqpf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
lapack_int* jpvt, float* tau, float* work,
lapack_int *info );
void LAPACK_dgeqpf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
lapack_int* jpvt, double* tau, double* work,
lapack_int *info );
void LAPACK_cgeqpf( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int* jpvt,
lapack_complex_float* tau, lapack_complex_float* work,
float* rwork, lapack_int *info );
void LAPACK_zgeqpf( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int* jpvt,
lapack_complex_double* tau, lapack_complex_double* work,
double* rwork, lapack_int *info );
void LAPACK_sgeqp3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
lapack_int* jpvt, float* tau, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dgeqp3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
lapack_int* jpvt, double* tau, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cgeqp3( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int* jpvt,
lapack_complex_float* tau, lapack_complex_float* work,
lapack_int* lwork, float* rwork, lapack_int *info );
void LAPACK_zgeqp3( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int* jpvt,
lapack_complex_double* tau, lapack_complex_double* work,
lapack_int* lwork, double* rwork, lapack_int *info );
void LAPACK_sorgqr( lapack_int* m, lapack_int* n, lapack_int* k, float* a,
lapack_int* lda, const float* tau, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dorgqr( lapack_int* m, lapack_int* n, lapack_int* k, double* a,
lapack_int* lda, const double* tau, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sormqr( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const float* a, lapack_int* lda,
const float* tau, float* c, lapack_int* ldc, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dormqr( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const double* a, lapack_int* lda,
const double* tau, double* c, lapack_int* ldc, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cungqr( lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* tau, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zungqr( lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cunmqr( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zunmqr( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const lapack_complex_double* a,
lapack_int* lda, const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int* ldc,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sgelqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* tau, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dgelqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* tau, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cgelqf( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zgelqf( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sorglq( lapack_int* m, lapack_int* n, lapack_int* k, float* a,
lapack_int* lda, const float* tau, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dorglq( lapack_int* m, lapack_int* n, lapack_int* k, double* a,
lapack_int* lda, const double* tau, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sormlq( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const float* a, lapack_int* lda,
const float* tau, float* c, lapack_int* ldc, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dormlq( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const double* a, lapack_int* lda,
const double* tau, double* c, lapack_int* ldc, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cunglq( lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* tau, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zunglq( lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cunmlq( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zunmlq( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const lapack_complex_double* a,
lapack_int* lda, const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int* ldc,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sgeqlf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* tau, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dgeqlf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* tau, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cgeqlf( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zgeqlf( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sorgql( lapack_int* m, lapack_int* n, lapack_int* k, float* a,
lapack_int* lda, const float* tau, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dorgql( lapack_int* m, lapack_int* n, lapack_int* k, double* a,
lapack_int* lda, const double* tau, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cungql( lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* tau, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zungql( lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sormql( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const float* a, lapack_int* lda,
const float* tau, float* c, lapack_int* ldc, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dormql( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const double* a, lapack_int* lda,
const double* tau, double* c, lapack_int* ldc, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cunmql( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zunmql( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const lapack_complex_double* a,
lapack_int* lda, const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int* ldc,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sgerqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* tau, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dgerqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* tau, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cgerqf( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zgerqf( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sorgrq( lapack_int* m, lapack_int* n, lapack_int* k, float* a,
lapack_int* lda, const float* tau, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dorgrq( lapack_int* m, lapack_int* n, lapack_int* k, double* a,
lapack_int* lda, const double* tau, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cungrq( lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* tau, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zungrq( lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sormrq( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const float* a, lapack_int* lda,
const float* tau, float* c, lapack_int* ldc, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dormrq( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const double* a, lapack_int* lda,
const double* tau, double* c, lapack_int* ldc, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cunmrq( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zunmrq( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, const lapack_complex_double* a,
lapack_int* lda, const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int* ldc,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_stzrzf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* tau, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dtzrzf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* tau, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_ctzrzf( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_ztzrzf( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sormrz( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* l, const float* a,
lapack_int* lda, const float* tau, float* c,
lapack_int* ldc, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dormrz( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* l, const double* a,
lapack_int* lda, const double* tau, double* c,
lapack_int* ldc, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cunmrz( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* l, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zunmrz( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* l,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* tau, lapack_complex_double* c,
lapack_int* ldc, lapack_complex_double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sggqrf( lapack_int* n, lapack_int* m, lapack_int* p, float* a,
lapack_int* lda, float* taua, float* b, lapack_int* ldb,
float* taub, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dggqrf( lapack_int* n, lapack_int* m, lapack_int* p, double* a,
lapack_int* lda, double* taua, double* b, lapack_int* ldb,
double* taub, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cggqrf( lapack_int* n, lapack_int* m, lapack_int* p,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* taua, lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* taub,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zggqrf( lapack_int* n, lapack_int* m, lapack_int* p,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* taua, lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* taub,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sggrqf( lapack_int* m, lapack_int* p, lapack_int* n, float* a,
lapack_int* lda, float* taua, float* b, lapack_int* ldb,
float* taub, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dggrqf( lapack_int* m, lapack_int* p, lapack_int* n, double* a,
lapack_int* lda, double* taua, double* b, lapack_int* ldb,
double* taub, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cggrqf( lapack_int* m, lapack_int* p, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* taua, lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* taub,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zggrqf( lapack_int* m, lapack_int* p, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* taua, lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* taub,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sgebrd( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* d, float* e, float* tauq, float* taup, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dgebrd( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* d, double* e, double* tauq, double* taup,
double* work, lapack_int* lwork, lapack_int *info );
void LAPACK_cgebrd( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, float* d, float* e,
lapack_complex_float* tauq, lapack_complex_float* taup,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zgebrd( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, double* d, double* e,
lapack_complex_double* tauq, lapack_complex_double* taup,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,
lapack_int* kl, lapack_int* ku, float* ab, lapack_int* ldab,
float* d, float* e, float* q, lapack_int* ldq, float* pt,
lapack_int* ldpt, float* c, lapack_int* ldc, float* work,
lapack_int *info );
void LAPACK_dgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,
lapack_int* kl, lapack_int* ku, double* ab,
lapack_int* ldab, double* d, double* e, double* q,
lapack_int* ldq, double* pt, lapack_int* ldpt, double* c,
lapack_int* ldc, double* work, lapack_int *info );
void LAPACK_cgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,
lapack_int* kl, lapack_int* ku, lapack_complex_float* ab,
lapack_int* ldab, float* d, float* e,
lapack_complex_float* q, lapack_int* ldq,
lapack_complex_float* pt, lapack_int* ldpt,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,
lapack_int* kl, lapack_int* ku, lapack_complex_double* ab,
lapack_int* ldab, double* d, double* e,
lapack_complex_double* q, lapack_int* ldq,
lapack_complex_double* pt, lapack_int* ldpt,
lapack_complex_double* c, lapack_int* ldc,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,
float* a, lapack_int* lda, const float* tau, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,
double* a, lapack_int* lda, const double* tau, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sormbr( char* vect, char* side, char* trans, lapack_int* m,
lapack_int* n, lapack_int* k, const float* a,
lapack_int* lda, const float* tau, float* c,
lapack_int* ldc, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dormbr( char* vect, char* side, char* trans, lapack_int* m,
lapack_int* n, lapack_int* k, const double* a,
lapack_int* lda, const double* tau, double* c,
lapack_int* ldc, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* tau, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,
lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cunmbr( char* vect, char* side, char* trans, lapack_int* m,
lapack_int* n, lapack_int* k, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zunmbr( char* vect, char* side, char* trans, lapack_int* m,
lapack_int* n, lapack_int* k,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* tau, lapack_complex_double* c,
lapack_int* ldc, lapack_complex_double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,
lapack_int* nru, lapack_int* ncc, float* d, float* e,
float* vt, lapack_int* ldvt, float* u, lapack_int* ldu,
float* c, lapack_int* ldc, float* work, lapack_int *info );
void LAPACK_dbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,
lapack_int* nru, lapack_int* ncc, double* d, double* e,
double* vt, lapack_int* ldvt, double* u, lapack_int* ldu,
double* c, lapack_int* ldc, double* work,
lapack_int *info );
void LAPACK_cbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,
lapack_int* nru, lapack_int* ncc, float* d, float* e,
lapack_complex_float* vt, lapack_int* ldvt,
lapack_complex_float* u, lapack_int* ldu,
lapack_complex_float* c, lapack_int* ldc, float* work,
lapack_int *info );
void LAPACK_zbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,
lapack_int* nru, lapack_int* ncc, double* d, double* e,
lapack_complex_double* vt, lapack_int* ldvt,
lapack_complex_double* u, lapack_int* ldu,
lapack_complex_double* c, lapack_int* ldc, double* work,
lapack_int *info );
void LAPACK_sbdsdc( char* uplo, char* compq, lapack_int* n, float* d, float* e,
float* u, lapack_int* ldu, float* vt, lapack_int* ldvt,
float* q, lapack_int* iq, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dbdsdc( char* uplo, char* compq, lapack_int* n, double* d,
double* e, double* u, lapack_int* ldu, double* vt,
lapack_int* ldvt, double* q, lapack_int* iq, double* work,
lapack_int* iwork, lapack_int *info );
void LAPACK_ssytrd( char* uplo, lapack_int* n, float* a, lapack_int* lda,
float* d, float* e, float* tau, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dsytrd( char* uplo, lapack_int* n, double* a, lapack_int* lda,
double* d, double* e, double* tau, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sorgtr( char* uplo, lapack_int* n, float* a, lapack_int* lda,
const float* tau, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dorgtr( char* uplo, lapack_int* n, double* a, lapack_int* lda,
const double* tau, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sormtr( char* side, char* uplo, char* trans, lapack_int* m,
lapack_int* n, const float* a, lapack_int* lda,
const float* tau, float* c, lapack_int* ldc, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dormtr( char* side, char* uplo, char* trans, lapack_int* m,
lapack_int* n, const double* a, lapack_int* lda,
const double* tau, double* c, lapack_int* ldc, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_chetrd( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, float* d, float* e,
lapack_complex_float* tau, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zhetrd( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, double* d, double* e,
lapack_complex_double* tau, lapack_complex_double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cungtr( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* tau,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zungtr( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cunmtr( char* side, char* uplo, char* trans, lapack_int* m,
lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* tau,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zunmtr( char* side, char* uplo, char* trans, lapack_int* m,
lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, const lapack_complex_double* tau,
lapack_complex_double* c, lapack_int* ldc,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_ssptrd( char* uplo, lapack_int* n, float* ap, float* d, float* e,
float* tau, lapack_int *info );
void LAPACK_dsptrd( char* uplo, lapack_int* n, double* ap, double* d, double* e,
double* tau, lapack_int *info );
void LAPACK_sopgtr( char* uplo, lapack_int* n, const float* ap,
const float* tau, float* q, lapack_int* ldq, float* work,
lapack_int *info );
void LAPACK_dopgtr( char* uplo, lapack_int* n, const double* ap,
const double* tau, double* q, lapack_int* ldq, double* work,
lapack_int *info );
void LAPACK_sopmtr( char* side, char* uplo, char* trans, lapack_int* m,
lapack_int* n, const float* ap, const float* tau, float* c,
lapack_int* ldc, float* work, lapack_int *info );
void LAPACK_dopmtr( char* side, char* uplo, char* trans, lapack_int* m,
lapack_int* n, const double* ap, const double* tau,
double* c, lapack_int* ldc, double* work,
lapack_int *info );
void LAPACK_chptrd( char* uplo, lapack_int* n, lapack_complex_float* ap,
float* d, float* e, lapack_complex_float* tau,
lapack_int *info );
void LAPACK_zhptrd( char* uplo, lapack_int* n, lapack_complex_double* ap,
double* d, double* e, lapack_complex_double* tau,
lapack_int *info );
void LAPACK_cupgtr( char* uplo, lapack_int* n, const lapack_complex_float* ap,
const lapack_complex_float* tau, lapack_complex_float* q,
lapack_int* ldq, lapack_complex_float* work,
lapack_int *info );
void LAPACK_zupgtr( char* uplo, lapack_int* n, const lapack_complex_double* ap,
const lapack_complex_double* tau, lapack_complex_double* q,
lapack_int* ldq, lapack_complex_double* work,
lapack_int *info );
void LAPACK_cupmtr( char* side, char* uplo, char* trans, lapack_int* m,
lapack_int* n, const lapack_complex_float* ap,
const lapack_complex_float* tau, lapack_complex_float* c,
lapack_int* ldc, lapack_complex_float* work,
lapack_int *info );
void LAPACK_zupmtr( char* side, char* uplo, char* trans, lapack_int* m,
lapack_int* n, const lapack_complex_double* ap,
const lapack_complex_double* tau, lapack_complex_double* c,
lapack_int* ldc, lapack_complex_double* work,
lapack_int *info );
void LAPACK_ssbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,
float* ab, lapack_int* ldab, float* d, float* e, float* q,
lapack_int* ldq, float* work, lapack_int *info );
void LAPACK_dsbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,
double* ab, lapack_int* ldab, double* d, double* e,
double* q, lapack_int* ldq, double* work,
lapack_int *info );
void LAPACK_chbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,
lapack_complex_float* ab, lapack_int* ldab, float* d,
float* e, lapack_complex_float* q, lapack_int* ldq,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zhbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,
lapack_complex_double* ab, lapack_int* ldab, double* d,
double* e, lapack_complex_double* q, lapack_int* ldq,
lapack_complex_double* work, lapack_int *info );
void LAPACK_ssterf( lapack_int* n, float* d, float* e, lapack_int *info );
void LAPACK_dsterf( lapack_int* n, double* d, double* e, lapack_int *info );
void LAPACK_ssteqr( char* compz, lapack_int* n, float* d, float* e, float* z,
lapack_int* ldz, float* work, lapack_int *info );
void LAPACK_dsteqr( char* compz, lapack_int* n, double* d, double* e, double* z,
lapack_int* ldz, double* work, lapack_int *info );
void LAPACK_csteqr( char* compz, lapack_int* n, float* d, float* e,
lapack_complex_float* z, lapack_int* ldz, float* work,
lapack_int *info );
void LAPACK_zsteqr( char* compz, lapack_int* n, double* d, double* e,
lapack_complex_double* z, lapack_int* ldz, double* work,
lapack_int *info );
void LAPACK_sstemr( char* jobz, char* range, lapack_int* n, float* d, float* e,
float* vl, float* vu, lapack_int* il, lapack_int* iu,
lapack_int* m, float* w, float* z, lapack_int* ldz,
lapack_int* nzc, lapack_int* isuppz, lapack_logical* tryrac,
float* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_dstemr( char* jobz, char* range, lapack_int* n, double* d,
double* e, double* vl, double* vu, lapack_int* il,
lapack_int* iu, lapack_int* m, double* w, double* z,
lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz,
lapack_logical* tryrac, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_cstemr( char* jobz, char* range, lapack_int* n, float* d, float* e,
float* vl, float* vu, lapack_int* il, lapack_int* iu,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz,
lapack_logical* tryrac, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_zstemr( char* jobz, char* range, lapack_int* n, double* d,
double* e, double* vl, double* vu, lapack_int* il,
lapack_int* iu, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int* ldz, lapack_int* nzc,
lapack_int* isuppz, lapack_logical* tryrac, double* work,
lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_sstedc( char* compz, lapack_int* n, float* d, float* e, float* z,
lapack_int* ldz, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_dstedc( char* compz, lapack_int* n, double* d, double* e, double* z,
lapack_int* ldz, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_cstedc( char* compz, lapack_int* n, float* d, float* e,
lapack_complex_float* z, lapack_int* ldz,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_zstedc( char* compz, lapack_int* n, double* d, double* e,
lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* lrwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_sstegr( char* jobz, char* range, lapack_int* n, float* d, float* e,
float* vl, float* vu, lapack_int* il, lapack_int* iu,
float* abstol, lapack_int* m, float* w, float* z,
lapack_int* ldz, lapack_int* isuppz, float* work,
lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_dstegr( char* jobz, char* range, lapack_int* n, double* d,
double* e, double* vl, double* vu, lapack_int* il,
lapack_int* iu, double* abstol, lapack_int* m, double* w,
double* z, lapack_int* ldz, lapack_int* isuppz,
double* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_cstegr( char* jobz, char* range, lapack_int* n, float* d, float* e,
float* vl, float* vu, lapack_int* il, lapack_int* iu,
float* abstol, lapack_int* m, float* w,
lapack_complex_float* z, lapack_int* ldz,
lapack_int* isuppz, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_zstegr( char* jobz, char* range, lapack_int* n, double* d,
double* e, double* vl, double* vu, lapack_int* il,
lapack_int* iu, double* abstol, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int* ldz,
lapack_int* isuppz, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_spteqr( char* compz, lapack_int* n, float* d, float* e, float* z,
lapack_int* ldz, float* work, lapack_int *info );
void LAPACK_dpteqr( char* compz, lapack_int* n, double* d, double* e, double* z,
lapack_int* ldz, double* work, lapack_int *info );
void LAPACK_cpteqr( char* compz, lapack_int* n, float* d, float* e,
lapack_complex_float* z, lapack_int* ldz, float* work,
lapack_int *info );
void LAPACK_zpteqr( char* compz, lapack_int* n, double* d, double* e,
lapack_complex_double* z, lapack_int* ldz, double* work,
lapack_int *info );
void LAPACK_sstebz( char* range, char* order, lapack_int* n, float* vl,
float* vu, lapack_int* il, lapack_int* iu, float* abstol,
const float* d, const float* e, lapack_int* m,
lapack_int* nsplit, float* w, lapack_int* iblock,
lapack_int* isplit, float* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_dstebz( char* range, char* order, lapack_int* n, double* vl,
double* vu, lapack_int* il, lapack_int* iu, double* abstol,
const double* d, const double* e, lapack_int* m,
lapack_int* nsplit, double* w, lapack_int* iblock,
lapack_int* isplit, double* work, lapack_int* iwork,
lapack_int *info );
void LAPACK_sstein( lapack_int* n, const float* d, const float* e,
lapack_int* m, const float* w, const lapack_int* iblock,
const lapack_int* isplit, float* z, lapack_int* ldz,
float* work, lapack_int* iwork, lapack_int* ifailv,
lapack_int *info );
void LAPACK_dstein( lapack_int* n, const double* d, const double* e,
lapack_int* m, const double* w, const lapack_int* iblock,
const lapack_int* isplit, double* z, lapack_int* ldz,
double* work, lapack_int* iwork, lapack_int* ifailv,
lapack_int *info );
void LAPACK_cstein( lapack_int* n, const float* d, const float* e,
lapack_int* m, const float* w, const lapack_int* iblock,
const lapack_int* isplit, lapack_complex_float* z,
lapack_int* ldz, float* work, lapack_int* iwork,
lapack_int* ifailv, lapack_int *info );
void LAPACK_zstein( lapack_int* n, const double* d, const double* e,
lapack_int* m, const double* w, const lapack_int* iblock,
const lapack_int* isplit, lapack_complex_double* z,
lapack_int* ldz, double* work, lapack_int* iwork,
lapack_int* ifailv, lapack_int *info );
void LAPACK_sdisna( char* job, lapack_int* m, lapack_int* n, const float* d,
float* sep, lapack_int *info );
void LAPACK_ddisna( char* job, lapack_int* m, lapack_int* n, const double* d,
double* sep, lapack_int *info );
void LAPACK_ssygst( lapack_int* itype, char* uplo, lapack_int* n, float* a,
lapack_int* lda, const float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_dsygst( lapack_int* itype, char* uplo, lapack_int* n, double* a,
lapack_int* lda, const double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_chegst( lapack_int* itype, char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_zhegst( lapack_int* itype, char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* b, lapack_int* ldb,
lapack_int *info );
void LAPACK_sspgst( lapack_int* itype, char* uplo, lapack_int* n, float* ap,
const float* bp, lapack_int *info );
void LAPACK_dspgst( lapack_int* itype, char* uplo, lapack_int* n, double* ap,
const double* bp, lapack_int *info );
void LAPACK_chpgst( lapack_int* itype, char* uplo, lapack_int* n,
lapack_complex_float* ap, const lapack_complex_float* bp,
lapack_int *info );
void LAPACK_zhpgst( lapack_int* itype, char* uplo, lapack_int* n,
lapack_complex_double* ap, const lapack_complex_double* bp,
lapack_int *info );
void LAPACK_ssbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, float* ab, lapack_int* ldab,
const float* bb, lapack_int* ldbb, float* x,
lapack_int* ldx, float* work, lapack_int *info );
void LAPACK_dsbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, double* ab, lapack_int* ldab,
const double* bb, lapack_int* ldbb, double* x,
lapack_int* ldx, double* work, lapack_int *info );
void LAPACK_chbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab,
const lapack_complex_float* bb, lapack_int* ldbb,
lapack_complex_float* x, lapack_int* ldx,
lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zhbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab,
const lapack_complex_double* bb, lapack_int* ldbb,
lapack_complex_double* x, lapack_int* ldx,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_spbstf( char* uplo, lapack_int* n, lapack_int* kb, float* bb,
lapack_int* ldbb, lapack_int *info );
void LAPACK_dpbstf( char* uplo, lapack_int* n, lapack_int* kb, double* bb,
lapack_int* ldbb, lapack_int *info );
void LAPACK_cpbstf( char* uplo, lapack_int* n, lapack_int* kb,
lapack_complex_float* bb, lapack_int* ldbb,
lapack_int *info );
void LAPACK_zpbstf( char* uplo, lapack_int* n, lapack_int* kb,
lapack_complex_double* bb, lapack_int* ldbb,
lapack_int *info );
void LAPACK_sgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a,
lapack_int* lda, float* tau, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a,
lapack_int* lda, double* tau, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* tau, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* tau, lapack_complex_double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a,
lapack_int* lda, const float* tau, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a,
lapack_int* lda, const double* tau, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sormhr( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* ilo, lapack_int* ihi, const float* a,
lapack_int* lda, const float* tau, float* c,
lapack_int* ldc, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dormhr( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* ilo, lapack_int* ihi, const double* a,
lapack_int* lda, const double* tau, double* c,
lapack_int* ldc, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi,
lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* tau, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi,
lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cunmhr( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* ilo, lapack_int* ihi,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* tau, lapack_complex_float* c,
lapack_int* ldc, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zunmhr( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* ilo, lapack_int* ihi,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* tau, lapack_complex_double* c,
lapack_int* ldc, lapack_complex_double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sgebal( char* job, lapack_int* n, float* a, lapack_int* lda,
lapack_int* ilo, lapack_int* ihi, float* scale,
lapack_int *info );
void LAPACK_dgebal( char* job, lapack_int* n, double* a, lapack_int* lda,
lapack_int* ilo, lapack_int* ihi, double* scale,
lapack_int *info );
void LAPACK_cgebal( char* job, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int* ilo, lapack_int* ihi,
float* scale, lapack_int *info );
void LAPACK_zgebal( char* job, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int* ilo, lapack_int* ihi,
double* scale, lapack_int *info );
void LAPACK_sgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, const float* scale, lapack_int* m,
float* v, lapack_int* ldv, lapack_int *info );
void LAPACK_dgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, const double* scale, lapack_int* m,
double* v, lapack_int* ldv, lapack_int *info );
void LAPACK_cgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, const float* scale, lapack_int* m,
lapack_complex_float* v, lapack_int* ldv,
lapack_int *info );
void LAPACK_zgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, const double* scale, lapack_int* m,
lapack_complex_double* v, lapack_int* ldv,
lapack_int *info );
void LAPACK_shseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, float* h, lapack_int* ldh, float* wr,
float* wi, float* z, lapack_int* ldz, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, double* h, lapack_int* ldh, double* wr,
double* wi, double* z, lapack_int* ldz, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_chseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, lapack_complex_float* h, lapack_int* ldh,
lapack_complex_float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, lapack_complex_double* h, lapack_int* ldh,
lapack_complex_double* w, lapack_complex_double* z,
lapack_int* ldz, lapack_complex_double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_shsein( char* job, char* eigsrc, char* initv,
lapack_logical* select, lapack_int* n, const float* h,
lapack_int* ldh, float* wr, const float* wi, float* vl,
lapack_int* ldvl, float* vr, lapack_int* ldvr,
lapack_int* mm, lapack_int* m, float* work,
lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );
void LAPACK_dhsein( char* job, char* eigsrc, char* initv,
lapack_logical* select, lapack_int* n, const double* h,
lapack_int* ldh, double* wr, const double* wi, double* vl,
lapack_int* ldvl, double* vr, lapack_int* ldvr,
lapack_int* mm, lapack_int* m, double* work,
lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );
void LAPACK_chsein( char* job, char* eigsrc, char* initv,
const lapack_logical* select, lapack_int* n,
const lapack_complex_float* h, lapack_int* ldh,
lapack_complex_float* w, lapack_complex_float* vl,
lapack_int* ldvl, lapack_complex_float* vr,
lapack_int* ldvr, lapack_int* mm, lapack_int* m,
lapack_complex_float* work, float* rwork,
lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );
void LAPACK_zhsein( char* job, char* eigsrc, char* initv,
const lapack_logical* select, lapack_int* n,
const lapack_complex_double* h, lapack_int* ldh,
lapack_complex_double* w, lapack_complex_double* vl,
lapack_int* ldvl, lapack_complex_double* vr,
lapack_int* ldvr, lapack_int* mm, lapack_int* m,
lapack_complex_double* work, double* rwork,
lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );
void LAPACK_strevc( char* side, char* howmny, lapack_logical* select,
lapack_int* n, const float* t, lapack_int* ldt, float* vl,
lapack_int* ldvl, float* vr, lapack_int* ldvr,
lapack_int* mm, lapack_int* m, float* work,
lapack_int *info );
void LAPACK_dtrevc( char* side, char* howmny, lapack_logical* select,
lapack_int* n, const double* t, lapack_int* ldt, double* vl,
lapack_int* ldvl, double* vr, lapack_int* ldvr,
lapack_int* mm, lapack_int* m, double* work,
lapack_int *info );
void LAPACK_ctrevc( char* side, char* howmny, const lapack_logical* select,
lapack_int* n, lapack_complex_float* t, lapack_int* ldt,
lapack_complex_float* vl, lapack_int* ldvl,
lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm,
lapack_int* m, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_ztrevc( char* side, char* howmny, const lapack_logical* select,
lapack_int* n, lapack_complex_double* t, lapack_int* ldt,
lapack_complex_double* vl, lapack_int* ldvl,
lapack_complex_double* vr, lapack_int* ldvr, lapack_int* mm,
lapack_int* m, lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_strsna( char* job, char* howmny, const lapack_logical* select,
lapack_int* n, const float* t, lapack_int* ldt,
const float* vl, lapack_int* ldvl, const float* vr,
lapack_int* ldvr, float* s, float* sep, lapack_int* mm,
lapack_int* m, float* work, lapack_int* ldwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_dtrsna( char* job, char* howmny, const lapack_logical* select,
lapack_int* n, const double* t, lapack_int* ldt,
const double* vl, lapack_int* ldvl, const double* vr,
lapack_int* ldvr, double* s, double* sep, lapack_int* mm,
lapack_int* m, double* work, lapack_int* ldwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_ctrsna( char* job, char* howmny, const lapack_logical* select,
lapack_int* n, const lapack_complex_float* t,
lapack_int* ldt, const lapack_complex_float* vl,
lapack_int* ldvl, const lapack_complex_float* vr,
lapack_int* ldvr, float* s, float* sep, lapack_int* mm,
lapack_int* m, lapack_complex_float* work,
lapack_int* ldwork, float* rwork, lapack_int *info );
void LAPACK_ztrsna( char* job, char* howmny, const lapack_logical* select,
lapack_int* n, const lapack_complex_double* t,
lapack_int* ldt, const lapack_complex_double* vl,
lapack_int* ldvl, const lapack_complex_double* vr,
lapack_int* ldvr, double* s, double* sep, lapack_int* mm,
lapack_int* m, lapack_complex_double* work,
lapack_int* ldwork, double* rwork, lapack_int *info );
void LAPACK_strexc( char* compq, lapack_int* n, float* t, lapack_int* ldt,
float* q, lapack_int* ldq, lapack_int* ifst,
lapack_int* ilst, float* work, lapack_int *info );
void LAPACK_dtrexc( char* compq, lapack_int* n, double* t, lapack_int* ldt,
double* q, lapack_int* ldq, lapack_int* ifst,
lapack_int* ilst, double* work, lapack_int *info );
void LAPACK_ctrexc( char* compq, lapack_int* n, lapack_complex_float* t,
lapack_int* ldt, lapack_complex_float* q, lapack_int* ldq,
lapack_int* ifst, lapack_int* ilst, lapack_int *info );
void LAPACK_ztrexc( char* compq, lapack_int* n, lapack_complex_double* t,
lapack_int* ldt, lapack_complex_double* q, lapack_int* ldq,
lapack_int* ifst, lapack_int* ilst, lapack_int *info );
void LAPACK_strsen( char* job, char* compq, const lapack_logical* select,
lapack_int* n, float* t, lapack_int* ldt, float* q,
lapack_int* ldq, float* wr, float* wi, lapack_int* m,
float* s, float* sep, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_dtrsen( char* job, char* compq, const lapack_logical* select,
lapack_int* n, double* t, lapack_int* ldt, double* q,
lapack_int* ldq, double* wr, double* wi, lapack_int* m,
double* s, double* sep, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_ctrsen( char* job, char* compq, const lapack_logical* select,
lapack_int* n, lapack_complex_float* t, lapack_int* ldt,
lapack_complex_float* q, lapack_int* ldq,
lapack_complex_float* w, lapack_int* m, float* s,
float* sep, lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_ztrsen( char* job, char* compq, const lapack_logical* select,
lapack_int* n, lapack_complex_double* t, lapack_int* ldt,
lapack_complex_double* q, lapack_int* ldq,
lapack_complex_double* w, lapack_int* m, double* s,
double* sep, lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_strsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,
lapack_int* n, const float* a, lapack_int* lda,
const float* b, lapack_int* ldb, float* c, lapack_int* ldc,
float* scale, lapack_int *info );
void LAPACK_dtrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,
lapack_int* n, const double* a, lapack_int* lda,
const double* b, lapack_int* ldb, double* c,
lapack_int* ldc, double* scale, lapack_int *info );
void LAPACK_ctrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,
lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* b,
lapack_int* ldb, lapack_complex_float* c, lapack_int* ldc,
float* scale, lapack_int *info );
void LAPACK_ztrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,
lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, const lapack_complex_double* b,
lapack_int* ldb, lapack_complex_double* c, lapack_int* ldc,
double* scale, lapack_int *info );
void LAPACK_sgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, float* a, lapack_int* lda, float* b,
lapack_int* ldb, float* q, lapack_int* ldq, float* z,
lapack_int* ldz, lapack_int *info );
void LAPACK_dgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, double* a, lapack_int* lda, double* b,
lapack_int* ldb, double* q, lapack_int* ldq, double* z,
lapack_int* ldz, lapack_int *info );
void LAPACK_cgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* q, lapack_int* ldq,
lapack_complex_float* z, lapack_int* ldz,
lapack_int *info );
void LAPACK_zgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* q, lapack_int* ldq,
lapack_complex_double* z, lapack_int* ldz,
lapack_int *info );
void LAPACK_sggbal( char* job, lapack_int* n, float* a, lapack_int* lda,
float* b, lapack_int* ldb, lapack_int* ilo, lapack_int* ihi,
float* lscale, float* rscale, float* work,
lapack_int *info );
void LAPACK_dggbal( char* job, lapack_int* n, double* a, lapack_int* lda,
double* b, lapack_int* ldb, lapack_int* ilo,
lapack_int* ihi, double* lscale, double* rscale,
double* work, lapack_int *info );
void LAPACK_cggbal( char* job, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,
lapack_int* ilo, lapack_int* ihi, float* lscale,
float* rscale, float* work, lapack_int *info );
void LAPACK_zggbal( char* job, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,
lapack_int* ilo, lapack_int* ihi, double* lscale,
double* rscale, double* work, lapack_int *info );
void LAPACK_sggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, const float* lscale, const float* rscale,
lapack_int* m, float* v, lapack_int* ldv,
lapack_int *info );
void LAPACK_dggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, const double* lscale, const double* rscale,
lapack_int* m, double* v, lapack_int* ldv,
lapack_int *info );
void LAPACK_cggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, const float* lscale, const float* rscale,
lapack_int* m, lapack_complex_float* v, lapack_int* ldv,
lapack_int *info );
void LAPACK_zggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,
lapack_int* ihi, const double* lscale, const double* rscale,
lapack_int* m, lapack_complex_double* v, lapack_int* ldv,
lapack_int *info );
void LAPACK_shgeqz( char* job, char* compq, char* compz, lapack_int* n,
lapack_int* ilo, lapack_int* ihi, float* h, lapack_int* ldh,
float* t, lapack_int* ldt, float* alphar, float* alphai,
float* beta, float* q, lapack_int* ldq, float* z,
lapack_int* ldz, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dhgeqz( char* job, char* compq, char* compz, lapack_int* n,
lapack_int* ilo, lapack_int* ihi, double* h,
lapack_int* ldh, double* t, lapack_int* ldt, double* alphar,
double* alphai, double* beta, double* q, lapack_int* ldq,
double* z, lapack_int* ldz, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_chgeqz( char* job, char* compq, char* compz, lapack_int* n,
lapack_int* ilo, lapack_int* ihi, lapack_complex_float* h,
lapack_int* ldh, lapack_complex_float* t, lapack_int* ldt,
lapack_complex_float* alpha, lapack_complex_float* beta,
lapack_complex_float* q, lapack_int* ldq,
lapack_complex_float* z, lapack_int* ldz,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int *info );
void LAPACK_zhgeqz( char* job, char* compq, char* compz, lapack_int* n,
lapack_int* ilo, lapack_int* ihi, lapack_complex_double* h,
lapack_int* ldh, lapack_complex_double* t, lapack_int* ldt,
lapack_complex_double* alpha, lapack_complex_double* beta,
lapack_complex_double* q, lapack_int* ldq,
lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int *info );
void LAPACK_stgevc( char* side, char* howmny, const lapack_logical* select,
lapack_int* n, const float* s, lapack_int* lds,
const float* p, lapack_int* ldp, float* vl,
lapack_int* ldvl, float* vr, lapack_int* ldvr,
lapack_int* mm, lapack_int* m, float* work,
lapack_int *info );
void LAPACK_dtgevc( char* side, char* howmny, const lapack_logical* select,
lapack_int* n, const double* s, lapack_int* lds,
const double* p, lapack_int* ldp, double* vl,
lapack_int* ldvl, double* vr, lapack_int* ldvr,
lapack_int* mm, lapack_int* m, double* work,
lapack_int *info );
void LAPACK_ctgevc( char* side, char* howmny, const lapack_logical* select,
lapack_int* n, const lapack_complex_float* s,
lapack_int* lds, const lapack_complex_float* p,
lapack_int* ldp, lapack_complex_float* vl, lapack_int* ldvl,
lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm,
lapack_int* m, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_ztgevc( char* side, char* howmny, const lapack_logical* select,
lapack_int* n, const lapack_complex_double* s,
lapack_int* lds, const lapack_complex_double* p,
lapack_int* ldp, lapack_complex_double* vl,
lapack_int* ldvl, lapack_complex_double* vr,
lapack_int* ldvr, lapack_int* mm, lapack_int* m,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_stgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,
float* a, lapack_int* lda, float* b, lapack_int* ldb,
float* q, lapack_int* ldq, float* z, lapack_int* ldz,
lapack_int* ifst, lapack_int* ilst, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dtgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,
double* a, lapack_int* lda, double* b, lapack_int* ldb,
double* q, lapack_int* ldq, double* z, lapack_int* ldz,
lapack_int* ifst, lapack_int* ilst, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_ctgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* q, lapack_int* ldq,
lapack_complex_float* z, lapack_int* ldz, lapack_int* ifst,
lapack_int* ilst, lapack_int *info );
void LAPACK_ztgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* q, lapack_int* ldq,
lapack_complex_double* z, lapack_int* ldz, lapack_int* ifst,
lapack_int* ilst, lapack_int *info );
void LAPACK_stgsen( lapack_int* ijob, lapack_logical* wantq,
lapack_logical* wantz, const lapack_logical* select,
lapack_int* n, float* a, lapack_int* lda, float* b,
lapack_int* ldb, float* alphar, float* alphai, float* beta,
float* q, lapack_int* ldq, float* z, lapack_int* ldz,
lapack_int* m, float* pl, float* pr, float* dif,
float* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_dtgsen( lapack_int* ijob, lapack_logical* wantq,
lapack_logical* wantz, const lapack_logical* select,
lapack_int* n, double* a, lapack_int* lda, double* b,
lapack_int* ldb, double* alphar, double* alphai,
double* beta, double* q, lapack_int* ldq, double* z,
lapack_int* ldz, lapack_int* m, double* pl, double* pr,
double* dif, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_ctgsen( lapack_int* ijob, lapack_logical* wantq,
lapack_logical* wantz, const lapack_logical* select,
lapack_int* n, lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* alpha, lapack_complex_float* beta,
lapack_complex_float* q, lapack_int* ldq,
lapack_complex_float* z, lapack_int* ldz, lapack_int* m,
float* pl, float* pr, float* dif,
lapack_complex_float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_ztgsen( lapack_int* ijob, lapack_logical* wantq,
lapack_logical* wantz, const lapack_logical* select,
lapack_int* n, lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* alpha, lapack_complex_double* beta,
lapack_complex_double* q, lapack_int* ldq,
lapack_complex_double* z, lapack_int* ldz, lapack_int* m,
double* pl, double* pr, double* dif,
lapack_complex_double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_stgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,
const float* a, lapack_int* lda, const float* b,
lapack_int* ldb, float* c, lapack_int* ldc, const float* d,
lapack_int* ldd, const float* e, lapack_int* lde, float* f,
lapack_int* ldf, float* scale, float* dif, float* work,
lapack_int* lwork, lapack_int* iwork, lapack_int *info );
void LAPACK_dtgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,
const double* a, lapack_int* lda, const double* b,
lapack_int* ldb, double* c, lapack_int* ldc,
const double* d, lapack_int* ldd, const double* e,
lapack_int* lde, double* f, lapack_int* ldf, double* scale,
double* dif, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_ctgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,
const lapack_complex_float* a, lapack_int* lda,
const lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* c, lapack_int* ldc,
const lapack_complex_float* d, lapack_int* ldd,
const lapack_complex_float* e, lapack_int* lde,
lapack_complex_float* f, lapack_int* ldf, float* scale,
float* dif, lapack_complex_float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_ztgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda,
const lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* c, lapack_int* ldc,
const lapack_complex_double* d, lapack_int* ldd,
const lapack_complex_double* e, lapack_int* lde,
lapack_complex_double* f, lapack_int* ldf, double* scale,
double* dif, lapack_complex_double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_stgsna( char* job, char* howmny, const lapack_logical* select,
lapack_int* n, const float* a, lapack_int* lda,
const float* b, lapack_int* ldb, const float* vl,
lapack_int* ldvl, const float* vr, lapack_int* ldvr,
float* s, float* dif, lapack_int* mm, lapack_int* m,
float* work, lapack_int* lwork, lapack_int* iwork,
lapack_int *info );
void LAPACK_dtgsna( char* job, char* howmny, const lapack_logical* select,
lapack_int* n, const double* a, lapack_int* lda,
const double* b, lapack_int* ldb, const double* vl,
lapack_int* ldvl, const double* vr, lapack_int* ldvr,
double* s, double* dif, lapack_int* mm, lapack_int* m,
double* work, lapack_int* lwork, lapack_int* iwork,
lapack_int *info );
void LAPACK_ctgsna( char* job, char* howmny, const lapack_logical* select,
lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, const lapack_complex_float* b,
lapack_int* ldb, const lapack_complex_float* vl,
lapack_int* ldvl, const lapack_complex_float* vr,
lapack_int* ldvr, float* s, float* dif, lapack_int* mm,
lapack_int* m, lapack_complex_float* work,
lapack_int* lwork, lapack_int* iwork, lapack_int *info );
void LAPACK_ztgsna( char* job, char* howmny, const lapack_logical* select,
lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, const lapack_complex_double* b,
lapack_int* ldb, const lapack_complex_double* vl,
lapack_int* ldvl, const lapack_complex_double* vr,
lapack_int* ldvr, double* s, double* dif, lapack_int* mm,
lapack_int* m, lapack_complex_double* work,
lapack_int* lwork, lapack_int* iwork, lapack_int *info );
void LAPACK_sggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* p, lapack_int* n, float* a, lapack_int* lda,
float* b, lapack_int* ldb, float* tola, float* tolb,
lapack_int* k, lapack_int* l, float* u, lapack_int* ldu,
float* v, lapack_int* ldv, float* q, lapack_int* ldq,
lapack_int* iwork, float* tau, float* work,
lapack_int *info );
void LAPACK_dggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* p, lapack_int* n, double* a, lapack_int* lda,
double* b, lapack_int* ldb, double* tola, double* tolb,
lapack_int* k, lapack_int* l, double* u, lapack_int* ldu,
double* v, lapack_int* ldv, double* q, lapack_int* ldq,
lapack_int* iwork, double* tau, double* work,
lapack_int *info );
void LAPACK_cggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* p, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,
float* tola, float* tolb, lapack_int* k, lapack_int* l,
lapack_complex_float* u, lapack_int* ldu,
lapack_complex_float* v, lapack_int* ldv,
lapack_complex_float* q, lapack_int* ldq, lapack_int* iwork,
float* rwork, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* p, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,
double* tola, double* tolb, lapack_int* k, lapack_int* l,
lapack_complex_double* u, lapack_int* ldu,
lapack_complex_double* v, lapack_int* ldv,
lapack_complex_double* q, lapack_int* ldq,
lapack_int* iwork, double* rwork,
lapack_complex_double* tau, lapack_complex_double* work,
lapack_int *info );
void LAPACK_stgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,
float* a, lapack_int* lda, float* b, lapack_int* ldb,
float* tola, float* tolb, float* alpha, float* beta,
float* u, lapack_int* ldu, float* v, lapack_int* ldv,
float* q, lapack_int* ldq, float* work, lapack_int* ncycle,
lapack_int *info );
void LAPACK_dtgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,
double* a, lapack_int* lda, double* b, lapack_int* ldb,
double* tola, double* tolb, double* alpha, double* beta,
double* u, lapack_int* ldu, double* v, lapack_int* ldv,
double* q, lapack_int* ldq, double* work,
lapack_int* ncycle, lapack_int *info );
void LAPACK_ctgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, float* tola,
float* tolb, float* alpha, float* beta,
lapack_complex_float* u, lapack_int* ldu,
lapack_complex_float* v, lapack_int* ldv,
lapack_complex_float* q, lapack_int* ldq,
lapack_complex_float* work, lapack_int* ncycle,
lapack_int *info );
void LAPACK_ztgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, double* tola,
double* tolb, double* alpha, double* beta,
lapack_complex_double* u, lapack_int* ldu,
lapack_complex_double* v, lapack_int* ldv,
lapack_complex_double* q, lapack_int* ldq,
lapack_complex_double* work, lapack_int* ncycle,
lapack_int *info );
void LAPACK_sgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,
float* a, lapack_int* lda, float* b, lapack_int* ldb,
float* work, lapack_int* lwork, lapack_int *info );
void LAPACK_dgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,
double* a, lapack_int* lda, double* b, lapack_int* ldb,
double* work, lapack_int* lwork, lapack_int *info );
void LAPACK_cgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_sgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a,
lapack_int* lda, float* b, lapack_int* ldb,
lapack_int* jpvt, float* rcond, lapack_int* rank,
float* work, lapack_int* lwork, lapack_int *info );
void LAPACK_dgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a,
lapack_int* lda, double* b, lapack_int* ldb,
lapack_int* jpvt, double* rcond, lapack_int* rank,
double* work, lapack_int* lwork, lapack_int *info );
void LAPACK_cgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, lapack_int* jpvt,
float* rcond, lapack_int* rank, lapack_complex_float* work,
lapack_int* lwork, float* rwork, lapack_int *info );
void LAPACK_zgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, lapack_int* jpvt,
double* rcond, lapack_int* rank,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int *info );
void LAPACK_sgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a,
lapack_int* lda, float* b, lapack_int* ldb, float* s,
float* rcond, lapack_int* rank, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a,
lapack_int* lda, double* b, lapack_int* ldb, double* s,
double* rcond, lapack_int* rank, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, float* s,
float* rcond, lapack_int* rank, lapack_complex_float* work,
lapack_int* lwork, float* rwork, lapack_int *info );
void LAPACK_zgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, double* s,
double* rcond, lapack_int* rank,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int *info );
void LAPACK_sgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a,
lapack_int* lda, float* b, lapack_int* ldb, float* s,
float* rcond, lapack_int* rank, float* work,
lapack_int* lwork, lapack_int* iwork, lapack_int *info );
void LAPACK_dgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a,
lapack_int* lda, double* b, lapack_int* ldb, double* s,
double* rcond, lapack_int* rank, double* work,
lapack_int* lwork, lapack_int* iwork, lapack_int *info );
void LAPACK_cgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, float* s,
float* rcond, lapack_int* rank, lapack_complex_float* work,
lapack_int* lwork, float* rwork, lapack_int* iwork,
lapack_int *info );
void LAPACK_zgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, double* s,
double* rcond, lapack_int* rank,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* iwork, lapack_int *info );
void LAPACK_sgglse( lapack_int* m, lapack_int* n, lapack_int* p, float* a,
lapack_int* lda, float* b, lapack_int* ldb, float* c,
float* d, float* x, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dgglse( lapack_int* m, lapack_int* n, lapack_int* p, double* a,
lapack_int* lda, double* b, lapack_int* ldb, double* c,
double* d, double* x, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cgglse( lapack_int* m, lapack_int* n, lapack_int* p,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* c, lapack_complex_float* d,
lapack_complex_float* x, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zgglse( lapack_int* m, lapack_int* n, lapack_int* p,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* c, lapack_complex_double* d,
lapack_complex_double* x, lapack_complex_double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sggglm( lapack_int* n, lapack_int* m, lapack_int* p, float* a,
lapack_int* lda, float* b, lapack_int* ldb, float* d,
float* x, float* y, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dggglm( lapack_int* n, lapack_int* m, lapack_int* p, double* a,
lapack_int* lda, double* b, lapack_int* ldb, double* d,
double* x, double* y, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cggglm( lapack_int* n, lapack_int* m, lapack_int* p,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* d, lapack_complex_float* x,
lapack_complex_float* y, lapack_complex_float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_zggglm( lapack_int* n, lapack_int* m, lapack_int* p,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* d, lapack_complex_double* x,
lapack_complex_double* y, lapack_complex_double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_ssyev( char* jobz, char* uplo, lapack_int* n, float* a,
lapack_int* lda, float* w, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dsyev( char* jobz, char* uplo, lapack_int* n, double* a,
lapack_int* lda, double* w, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cheev( char* jobz, char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda, float* w,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int *info );
void LAPACK_zheev( char* jobz, char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda, double* w,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int *info );
void LAPACK_ssyevd( char* jobz, char* uplo, lapack_int* n, float* a,
lapack_int* lda, float* w, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_dsyevd( char* jobz, char* uplo, lapack_int* n, double* a,
lapack_int* lda, double* w, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_cheevd( char* jobz, char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda, float* w,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_zheevd( char* jobz, char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda, double* w,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* lrwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_ssyevx( char* jobz, char* range, char* uplo, lapack_int* n,
float* a, lapack_int* lda, float* vl, float* vu,
lapack_int* il, lapack_int* iu, float* abstol,
lapack_int* m, float* w, float* z, lapack_int* ldz,
float* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_dsyevx( char* jobz, char* range, char* uplo, lapack_int* n,
double* a, lapack_int* lda, double* vl, double* vu,
lapack_int* il, lapack_int* iu, double* abstol,
lapack_int* m, double* w, double* z, lapack_int* ldz,
double* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_cheevx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda, float* vl,
float* vu, lapack_int* il, lapack_int* iu, float* abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_complex_float* work,
lapack_int* lwork, float* rwork, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_zheevx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda, double* vl,
double* vu, lapack_int* il, lapack_int* iu, double* abstol,
lapack_int* m, double* w, lapack_complex_double* z,
lapack_int* ldz, lapack_complex_double* work,
lapack_int* lwork, double* rwork, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_ssyevr( char* jobz, char* range, char* uplo, lapack_int* n,
float* a, lapack_int* lda, float* vl, float* vu,
lapack_int* il, lapack_int* iu, float* abstol,
lapack_int* m, float* w, float* z, lapack_int* ldz,
lapack_int* isuppz, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_dsyevr( char* jobz, char* range, char* uplo, lapack_int* n,
double* a, lapack_int* lda, double* vl, double* vu,
lapack_int* il, lapack_int* iu, double* abstol,
lapack_int* m, double* w, double* z, lapack_int* ldz,
lapack_int* isuppz, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_cheevr( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda, float* vl,
float* vu, lapack_int* il, lapack_int* iu, float* abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_int* isuppz,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_zheevr( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda, double* vl,
double* vu, lapack_int* il, lapack_int* iu, double* abstol,
lapack_int* m, double* w, lapack_complex_double* z,
lapack_int* ldz, lapack_int* isuppz,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* lrwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_sspev( char* jobz, char* uplo, lapack_int* n, float* ap, float* w,
float* z, lapack_int* ldz, float* work, lapack_int *info );
void LAPACK_dspev( char* jobz, char* uplo, lapack_int* n, double* ap, double* w,
double* z, lapack_int* ldz, double* work, lapack_int *info );
void LAPACK_chpev( char* jobz, char* uplo, lapack_int* n,
lapack_complex_float* ap, float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_complex_float* work, float* rwork,
lapack_int *info );
void LAPACK_zhpev( char* jobz, char* uplo, lapack_int* n,
lapack_complex_double* ap, double* w,
lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sspevd( char* jobz, char* uplo, lapack_int* n, float* ap, float* w,
float* z, lapack_int* ldz, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_dspevd( char* jobz, char* uplo, lapack_int* n, double* ap,
double* w, double* z, lapack_int* ldz, double* work,
lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_chpevd( char* jobz, char* uplo, lapack_int* n,
lapack_complex_float* ap, float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_complex_float* work,
lapack_int* lwork, float* rwork, lapack_int* lrwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_zhpevd( char* jobz, char* uplo, lapack_int* n,
lapack_complex_double* ap, double* w,
lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* lrwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_sspevx( char* jobz, char* range, char* uplo, lapack_int* n,
float* ap, float* vl, float* vu, lapack_int* il,
lapack_int* iu, float* abstol, lapack_int* m, float* w,
float* z, lapack_int* ldz, float* work, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_dspevx( char* jobz, char* range, char* uplo, lapack_int* n,
double* ap, double* vl, double* vu, lapack_int* il,
lapack_int* iu, double* abstol, lapack_int* m, double* w,
double* z, lapack_int* ldz, double* work, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_chpevx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_complex_float* ap, float* vl, float* vu,
lapack_int* il, lapack_int* iu, float* abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_complex_float* work, float* rwork,
lapack_int* iwork, lapack_int* ifail, lapack_int *info );
void LAPACK_zhpevx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_complex_double* ap, double* vl, double* vu,
lapack_int* il, lapack_int* iu, double* abstol,
lapack_int* m, double* w, lapack_complex_double* z,
lapack_int* ldz, lapack_complex_double* work, double* rwork,
lapack_int* iwork, lapack_int* ifail, lapack_int *info );
void LAPACK_ssbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,
float* ab, lapack_int* ldab, float* w, float* z,
lapack_int* ldz, float* work, lapack_int *info );
void LAPACK_dsbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,
double* ab, lapack_int* ldab, double* w, double* z,
lapack_int* ldz, double* work, lapack_int *info );
void LAPACK_chbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,
lapack_complex_float* ab, lapack_int* ldab, float* w,
lapack_complex_float* z, lapack_int* ldz,
lapack_complex_float* work, float* rwork, lapack_int *info );
void LAPACK_zhbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,
lapack_complex_double* ab, lapack_int* ldab, double* w,
lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_ssbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,
float* ab, lapack_int* ldab, float* w, float* z,
lapack_int* ldz, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_dsbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,
double* ab, lapack_int* ldab, double* w, double* z,
lapack_int* ldz, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_chbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,
lapack_complex_float* ab, lapack_int* ldab, float* w,
lapack_complex_float* z, lapack_int* ldz,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_zhbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,
lapack_complex_double* ab, lapack_int* ldab, double* w,
lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* lrwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_ssbevx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_int* kd, float* ab, lapack_int* ldab, float* q,
lapack_int* ldq, float* vl, float* vu, lapack_int* il,
lapack_int* iu, float* abstol, lapack_int* m, float* w,
float* z, lapack_int* ldz, float* work, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_dsbevx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_int* kd, double* ab, lapack_int* ldab, double* q,
lapack_int* ldq, double* vl, double* vu, lapack_int* il,
lapack_int* iu, double* abstol, lapack_int* m, double* w,
double* z, lapack_int* ldz, double* work, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_chbevx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_int* kd, lapack_complex_float* ab, lapack_int* ldab,
lapack_complex_float* q, lapack_int* ldq, float* vl,
float* vu, lapack_int* il, lapack_int* iu, float* abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_complex_float* work, float* rwork,
lapack_int* iwork, lapack_int* ifail, lapack_int *info );
void LAPACK_zhbevx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_int* kd, lapack_complex_double* ab, lapack_int* ldab,
lapack_complex_double* q, lapack_int* ldq, double* vl,
double* vu, lapack_int* il, lapack_int* iu, double* abstol,
lapack_int* m, double* w, lapack_complex_double* z,
lapack_int* ldz, lapack_complex_double* work, double* rwork,
lapack_int* iwork, lapack_int* ifail, lapack_int *info );
void LAPACK_sstev( char* jobz, lapack_int* n, float* d, float* e, float* z,
lapack_int* ldz, float* work, lapack_int *info );
void LAPACK_dstev( char* jobz, lapack_int* n, double* d, double* e, double* z,
lapack_int* ldz, double* work, lapack_int *info );
void LAPACK_sstevd( char* jobz, lapack_int* n, float* d, float* e, float* z,
lapack_int* ldz, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_dstevd( char* jobz, lapack_int* n, double* d, double* e, double* z,
lapack_int* ldz, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_sstevx( char* jobz, char* range, lapack_int* n, float* d, float* e,
float* vl, float* vu, lapack_int* il, lapack_int* iu,
float* abstol, lapack_int* m, float* w, float* z,
lapack_int* ldz, float* work, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_dstevx( char* jobz, char* range, lapack_int* n, double* d,
double* e, double* vl, double* vu, lapack_int* il,
lapack_int* iu, double* abstol, lapack_int* m, double* w,
double* z, lapack_int* ldz, double* work, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_sstevr( char* jobz, char* range, lapack_int* n, float* d, float* e,
float* vl, float* vu, lapack_int* il, lapack_int* iu,
float* abstol, lapack_int* m, float* w, float* z,
lapack_int* ldz, lapack_int* isuppz, float* work,
lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_dstevr( char* jobz, char* range, lapack_int* n, double* d,
double* e, double* vl, double* vu, lapack_int* il,
lapack_int* iu, double* abstol, lapack_int* m, double* w,
double* z, lapack_int* ldz, lapack_int* isuppz,
double* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_sgees( char* jobvs, char* sort, LAPACK_S_SELECT2 select,
lapack_int* n, float* a, lapack_int* lda, lapack_int* sdim,
float* wr, float* wi, float* vs, lapack_int* ldvs,
float* work, lapack_int* lwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_dgees( char* jobvs, char* sort, LAPACK_D_SELECT2 select,
lapack_int* n, double* a, lapack_int* lda, lapack_int* sdim,
double* wr, double* wi, double* vs, lapack_int* ldvs,
double* work, lapack_int* lwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_cgees( char* jobvs, char* sort, LAPACK_C_SELECT1 select,
lapack_int* n, lapack_complex_float* a, lapack_int* lda,
lapack_int* sdim, lapack_complex_float* w,
lapack_complex_float* vs, lapack_int* ldvs,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_logical* bwork, lapack_int *info );
void LAPACK_zgees( char* jobvs, char* sort, LAPACK_Z_SELECT1 select,
lapack_int* n, lapack_complex_double* a, lapack_int* lda,
lapack_int* sdim, lapack_complex_double* w,
lapack_complex_double* vs, lapack_int* ldvs,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_logical* bwork, lapack_int *info );
void LAPACK_sgeesx( char* jobvs, char* sort, LAPACK_S_SELECT2 select,
char* sense, lapack_int* n, float* a, lapack_int* lda,
lapack_int* sdim, float* wr, float* wi, float* vs,
lapack_int* ldvs, float* rconde, float* rcondv, float* work,
lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,
lapack_logical* bwork, lapack_int *info );
void LAPACK_dgeesx( char* jobvs, char* sort, LAPACK_D_SELECT2 select,
char* sense, lapack_int* n, double* a, lapack_int* lda,
lapack_int* sdim, double* wr, double* wi, double* vs,
lapack_int* ldvs, double* rconde, double* rcondv,
double* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_cgeesx( char* jobvs, char* sort, LAPACK_C_SELECT1 select,
char* sense, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int* sdim, lapack_complex_float* w,
lapack_complex_float* vs, lapack_int* ldvs, float* rconde,
float* rcondv, lapack_complex_float* work,
lapack_int* lwork, float* rwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_zgeesx( char* jobvs, char* sort, LAPACK_Z_SELECT1 select,
char* sense, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int* sdim, lapack_complex_double* w,
lapack_complex_double* vs, lapack_int* ldvs, double* rconde,
double* rcondv, lapack_complex_double* work,
lapack_int* lwork, double* rwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_sgeev( char* jobvl, char* jobvr, lapack_int* n, float* a,
lapack_int* lda, float* wr, float* wi, float* vl,
lapack_int* ldvl, float* vr, lapack_int* ldvr, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dgeev( char* jobvl, char* jobvr, lapack_int* n, double* a,
lapack_int* lda, double* wr, double* wi, double* vl,
lapack_int* ldvl, double* vr, lapack_int* ldvr, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cgeev( char* jobvl, char* jobvr, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* w, lapack_complex_float* vl,
lapack_int* ldvl, lapack_complex_float* vr, lapack_int* ldvr,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int *info );
void LAPACK_zgeev( char* jobvl, char* jobvr, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* w, lapack_complex_double* vl,
lapack_int* ldvl, lapack_complex_double* vr,
lapack_int* ldvr, lapack_complex_double* work,
lapack_int* lwork, double* rwork, lapack_int *info );
void LAPACK_sgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,
lapack_int* n, float* a, lapack_int* lda, float* wr,
float* wi, float* vl, lapack_int* ldvl, float* vr,
lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,
float* scale, float* abnrm, float* rconde, float* rcondv,
float* work, lapack_int* lwork, lapack_int* iwork,
lapack_int *info );
void LAPACK_dgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,
lapack_int* n, double* a, lapack_int* lda, double* wr,
double* wi, double* vl, lapack_int* ldvl, double* vr,
lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,
double* scale, double* abnrm, double* rconde,
double* rcondv, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_cgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,
lapack_int* n, lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* w, lapack_complex_float* vl,
lapack_int* ldvl, lapack_complex_float* vr,
lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,
float* scale, float* abnrm, float* rconde, float* rcondv,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int *info );
void LAPACK_zgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,
lapack_int* n, lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* w, lapack_complex_double* vl,
lapack_int* ldvl, lapack_complex_double* vr,
lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,
double* scale, double* abnrm, double* rconde,
double* rcondv, lapack_complex_double* work,
lapack_int* lwork, double* rwork, lapack_int *info );
void LAPACK_sgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,
float* a, lapack_int* lda, float* s, float* u,
lapack_int* ldu, float* vt, lapack_int* ldvt, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_dgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,
double* a, lapack_int* lda, double* s, double* u,
lapack_int* ldu, double* vt, lapack_int* ldvt, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,
lapack_complex_float* a, lapack_int* lda, float* s,
lapack_complex_float* u, lapack_int* ldu,
lapack_complex_float* vt, lapack_int* ldvt,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int *info );
void LAPACK_zgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,
lapack_complex_double* a, lapack_int* lda, double* s,
lapack_complex_double* u, lapack_int* ldu,
lapack_complex_double* vt, lapack_int* ldvt,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int *info );
void LAPACK_sgesdd( char* jobz, lapack_int* m, lapack_int* n, float* a,
lapack_int* lda, float* s, float* u, lapack_int* ldu,
float* vt, lapack_int* ldvt, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_dgesdd( char* jobz, lapack_int* m, lapack_int* n, double* a,
lapack_int* lda, double* s, double* u, lapack_int* ldu,
double* vt, lapack_int* ldvt, double* work,
lapack_int* lwork, lapack_int* iwork, lapack_int *info );
void LAPACK_cgesdd( char* jobz, lapack_int* m, lapack_int* n,
lapack_complex_float* a, lapack_int* lda, float* s,
lapack_complex_float* u, lapack_int* ldu,
lapack_complex_float* vt, lapack_int* ldvt,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_zgesdd( char* jobz, lapack_int* m, lapack_int* n,
lapack_complex_double* a, lapack_int* lda, double* s,
lapack_complex_double* u, lapack_int* ldu,
lapack_complex_double* vt, lapack_int* ldvt,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* iwork, lapack_int *info );
void LAPACK_dgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt,
char* jobp, lapack_int* m, lapack_int* n, double* a,
lapack_int* lda, double* sva, double* u, lapack_int* ldu,
double* v, lapack_int* ldv, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_sgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt,
char* jobp, lapack_int* m, lapack_int* n, float* a,
lapack_int* lda, float* sva, float* u, lapack_int* ldu,
float* v, lapack_int* ldv, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_dgesvj( char* joba, char* jobu, char* jobv, lapack_int* m,
lapack_int* n, double* a, lapack_int* lda, double* sva,
lapack_int* mv, double* v, lapack_int* ldv, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sgesvj( char* joba, char* jobu, char* jobv, lapack_int* m,
lapack_int* n, float* a, lapack_int* lda, float* sva,
lapack_int* mv, float* v, lapack_int* ldv, float* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_sggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,
float* a, lapack_int* lda, float* b, lapack_int* ldb,
float* alpha, float* beta, float* u, lapack_int* ldu,
float* v, lapack_int* ldv, float* q, lapack_int* ldq,
float* work, lapack_int* iwork, lapack_int *info );
void LAPACK_dggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,
double* a, lapack_int* lda, double* b, lapack_int* ldb,
double* alpha, double* beta, double* u, lapack_int* ldu,
double* v, lapack_int* ldv, double* q, lapack_int* ldq,
double* work, lapack_int* iwork, lapack_int *info );
void LAPACK_cggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, float* alpha,
float* beta, lapack_complex_float* u, lapack_int* ldu,
lapack_complex_float* v, lapack_int* ldv,
lapack_complex_float* q, lapack_int* ldq,
lapack_complex_float* work, float* rwork, lapack_int* iwork,
lapack_int *info );
void LAPACK_zggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,
lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, double* alpha,
double* beta, lapack_complex_double* u, lapack_int* ldu,
lapack_complex_double* v, lapack_int* ldv,
lapack_complex_double* q, lapack_int* ldq,
lapack_complex_double* work, double* rwork,
lapack_int* iwork, lapack_int *info );
void LAPACK_ssygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
float* a, lapack_int* lda, float* b, lapack_int* ldb,
float* w, float* work, lapack_int* lwork, lapack_int *info );
void LAPACK_dsygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
double* a, lapack_int* lda, double* b, lapack_int* ldb,
double* w, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_chegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, float* w,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int *info );
void LAPACK_zhegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, double* w,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int *info );
void LAPACK_ssygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
float* a, lapack_int* lda, float* b, lapack_int* ldb,
float* w, float* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_dsygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
double* a, lapack_int* lda, double* b, lapack_int* ldb,
double* w, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_chegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, float* w,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_zhegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, double* w,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* lrwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_ssygvx( lapack_int* itype, char* jobz, char* range, char* uplo,
lapack_int* n, float* a, lapack_int* lda, float* b,
lapack_int* ldb, float* vl, float* vu, lapack_int* il,
lapack_int* iu, float* abstol, lapack_int* m, float* w,
float* z, lapack_int* ldz, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* ifail, lapack_int *info );
void LAPACK_dsygvx( lapack_int* itype, char* jobz, char* range, char* uplo,
lapack_int* n, double* a, lapack_int* lda, double* b,
lapack_int* ldb, double* vl, double* vu, lapack_int* il,
lapack_int* iu, double* abstol, lapack_int* m, double* w,
double* z, lapack_int* ldz, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* ifail, lapack_int *info );
void LAPACK_chegvx( lapack_int* itype, char* jobz, char* range, char* uplo,
lapack_int* n, lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, float* vl,
float* vu, lapack_int* il, lapack_int* iu, float* abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_complex_float* work,
lapack_int* lwork, float* rwork, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_zhegvx( lapack_int* itype, char* jobz, char* range, char* uplo,
lapack_int* n, lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, double* vl,
double* vu, lapack_int* il, lapack_int* iu, double* abstol,
lapack_int* m, double* w, lapack_complex_double* z,
lapack_int* ldz, lapack_complex_double* work,
lapack_int* lwork, double* rwork, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_sspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
float* ap, float* bp, float* w, float* z, lapack_int* ldz,
float* work, lapack_int *info );
void LAPACK_dspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
double* ap, double* bp, double* w, double* z,
lapack_int* ldz, double* work, lapack_int *info );
void LAPACK_chpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
lapack_complex_float* ap, lapack_complex_float* bp, float* w,
lapack_complex_float* z, lapack_int* ldz,
lapack_complex_float* work, float* rwork, lapack_int *info );
void LAPACK_zhpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
lapack_complex_double* ap, lapack_complex_double* bp,
double* w, lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_sspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
float* ap, float* bp, float* w, float* z, lapack_int* ldz,
float* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_dspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
double* ap, double* bp, double* w, double* z,
lapack_int* ldz, double* work, lapack_int* lwork,
lapack_int* iwork, lapack_int* liwork, lapack_int *info );
void LAPACK_chpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
lapack_complex_float* ap, lapack_complex_float* bp,
float* w, lapack_complex_float* z, lapack_int* ldz,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_zhpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,
lapack_complex_double* ap, lapack_complex_double* bp,
double* w, lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* lrwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_sspgvx( lapack_int* itype, char* jobz, char* range, char* uplo,
lapack_int* n, float* ap, float* bp, float* vl, float* vu,
lapack_int* il, lapack_int* iu, float* abstol,
lapack_int* m, float* w, float* z, lapack_int* ldz,
float* work, lapack_int* iwork, lapack_int* ifail,
lapack_int *info );
void LAPACK_dspgvx( lapack_int* itype, char* jobz, char* range, char* uplo,
lapack_int* n, double* ap, double* bp, double* vl,
double* vu, lapack_int* il, lapack_int* iu, double* abstol,
lapack_int* m, double* w, double* z, lapack_int* ldz,
double* work, lapack_int* iwork, lapack_int* ifail,
lapack_int *info );
void LAPACK_chpgvx( lapack_int* itype, char* jobz, char* range, char* uplo,
lapack_int* n, lapack_complex_float* ap,
lapack_complex_float* bp, float* vl, float* vu,
lapack_int* il, lapack_int* iu, float* abstol,
lapack_int* m, float* w, lapack_complex_float* z,
lapack_int* ldz, lapack_complex_float* work, float* rwork,
lapack_int* iwork, lapack_int* ifail, lapack_int *info );
void LAPACK_zhpgvx( lapack_int* itype, char* jobz, char* range, char* uplo,
lapack_int* n, lapack_complex_double* ap,
lapack_complex_double* bp, double* vl, double* vu,
lapack_int* il, lapack_int* iu, double* abstol,
lapack_int* m, double* w, lapack_complex_double* z,
lapack_int* ldz, lapack_complex_double* work, double* rwork,
lapack_int* iwork, lapack_int* ifail, lapack_int *info );
void LAPACK_ssbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, float* ab, lapack_int* ldab, float* bb,
lapack_int* ldbb, float* w, float* z, lapack_int* ldz,
float* work, lapack_int *info );
void LAPACK_dsbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, double* ab, lapack_int* ldab, double* bb,
lapack_int* ldbb, double* w, double* z, lapack_int* ldz,
double* work, lapack_int *info );
void LAPACK_chbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab,
lapack_complex_float* bb, lapack_int* ldbb, float* w,
lapack_complex_float* z, lapack_int* ldz,
lapack_complex_float* work, float* rwork, lapack_int *info );
void LAPACK_zhbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab,
lapack_complex_double* bb, lapack_int* ldbb, double* w,
lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, double* rwork,
lapack_int *info );
void LAPACK_ssbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, float* ab, lapack_int* ldab, float* bb,
lapack_int* ldbb, float* w, float* z, lapack_int* ldz,
float* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_dsbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, double* ab, lapack_int* ldab, double* bb,
lapack_int* ldbb, double* w, double* z, lapack_int* ldz,
double* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_chbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab,
lapack_complex_float* bb, lapack_int* ldbb, float* w,
lapack_complex_float* z, lapack_int* ldz,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,
lapack_int *info );
void LAPACK_zhbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,
lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab,
lapack_complex_double* bb, lapack_int* ldbb, double* w,
lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* lrwork, lapack_int* iwork,
lapack_int* liwork, lapack_int *info );
void LAPACK_ssbgvx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_int* ka, lapack_int* kb, float* ab, lapack_int* ldab,
float* bb, lapack_int* ldbb, float* q, lapack_int* ldq,
float* vl, float* vu, lapack_int* il, lapack_int* iu,
float* abstol, lapack_int* m, float* w, float* z,
lapack_int* ldz, float* work, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_dsbgvx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_int* ka, lapack_int* kb, double* ab,
lapack_int* ldab, double* bb, lapack_int* ldbb, double* q,
lapack_int* ldq, double* vl, double* vu, lapack_int* il,
lapack_int* iu, double* abstol, lapack_int* m, double* w,
double* z, lapack_int* ldz, double* work, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_chbgvx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_int* ka, lapack_int* kb, lapack_complex_float* ab,
lapack_int* ldab, lapack_complex_float* bb,
lapack_int* ldbb, lapack_complex_float* q, lapack_int* ldq,
float* vl, float* vu, lapack_int* il, lapack_int* iu,
float* abstol, lapack_int* m, float* w,
lapack_complex_float* z, lapack_int* ldz,
lapack_complex_float* work, float* rwork, lapack_int* iwork,
lapack_int* ifail, lapack_int *info );
void LAPACK_zhbgvx( char* jobz, char* range, char* uplo, lapack_int* n,
lapack_int* ka, lapack_int* kb, lapack_complex_double* ab,
lapack_int* ldab, lapack_complex_double* bb,
lapack_int* ldbb, lapack_complex_double* q, lapack_int* ldq,
double* vl, double* vu, lapack_int* il, lapack_int* iu,
double* abstol, lapack_int* m, double* w,
lapack_complex_double* z, lapack_int* ldz,
lapack_complex_double* work, double* rwork,
lapack_int* iwork, lapack_int* ifail, lapack_int *info );
void LAPACK_sgges( char* jobvsl, char* jobvsr, char* sort,
LAPACK_S_SELECT3 selctg, lapack_int* n, float* a,
lapack_int* lda, float* b, lapack_int* ldb, lapack_int* sdim,
float* alphar, float* alphai, float* beta, float* vsl,
lapack_int* ldvsl, float* vsr, lapack_int* ldvsr,
float* work, lapack_int* lwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_dgges( char* jobvsl, char* jobvsr, char* sort,
LAPACK_D_SELECT3 selctg, lapack_int* n, double* a,
lapack_int* lda, double* b, lapack_int* ldb,
lapack_int* sdim, double* alphar, double* alphai,
double* beta, double* vsl, lapack_int* ldvsl, double* vsr,
lapack_int* ldvsr, double* work, lapack_int* lwork,
lapack_logical* bwork, lapack_int *info );
void LAPACK_cgges( char* jobvsl, char* jobvsr, char* sort,
LAPACK_C_SELECT2 selctg, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim,
lapack_complex_float* alpha, lapack_complex_float* beta,
lapack_complex_float* vsl, lapack_int* ldvsl,
lapack_complex_float* vsr, lapack_int* ldvsr,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_logical* bwork, lapack_int *info );
void LAPACK_zgges( char* jobvsl, char* jobvsr, char* sort,
LAPACK_Z_SELECT2 selctg, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim,
lapack_complex_double* alpha, lapack_complex_double* beta,
lapack_complex_double* vsl, lapack_int* ldvsl,
lapack_complex_double* vsr, lapack_int* ldvsr,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_logical* bwork, lapack_int *info );
void LAPACK_sggesx( char* jobvsl, char* jobvsr, char* sort,
LAPACK_S_SELECT3 selctg, char* sense, lapack_int* n,
float* a, lapack_int* lda, float* b, lapack_int* ldb,
lapack_int* sdim, float* alphar, float* alphai, float* beta,
float* vsl, lapack_int* ldvsl, float* vsr,
lapack_int* ldvsr, float* rconde, float* rcondv,
float* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_dggesx( char* jobvsl, char* jobvsr, char* sort,
LAPACK_D_SELECT3 selctg, char* sense, lapack_int* n,
double* a, lapack_int* lda, double* b, lapack_int* ldb,
lapack_int* sdim, double* alphar, double* alphai,
double* beta, double* vsl, lapack_int* ldvsl, double* vsr,
lapack_int* ldvsr, double* rconde, double* rcondv,
double* work, lapack_int* lwork, lapack_int* iwork,
lapack_int* liwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_cggesx( char* jobvsl, char* jobvsr, char* sort,
LAPACK_C_SELECT2 selctg, char* sense, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim,
lapack_complex_float* alpha, lapack_complex_float* beta,
lapack_complex_float* vsl, lapack_int* ldvsl,
lapack_complex_float* vsr, lapack_int* ldvsr, float* rconde,
float* rcondv, lapack_complex_float* work,
lapack_int* lwork, float* rwork, lapack_int* iwork,
lapack_int* liwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_zggesx( char* jobvsl, char* jobvsr, char* sort,
LAPACK_Z_SELECT2 selctg, char* sense, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim,
lapack_complex_double* alpha, lapack_complex_double* beta,
lapack_complex_double* vsl, lapack_int* ldvsl,
lapack_complex_double* vsr, lapack_int* ldvsr,
double* rconde, double* rcondv, lapack_complex_double* work,
lapack_int* lwork, double* rwork, lapack_int* iwork,
lapack_int* liwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_sggev( char* jobvl, char* jobvr, lapack_int* n, float* a,
lapack_int* lda, float* b, lapack_int* ldb, float* alphar,
float* alphai, float* beta, float* vl, lapack_int* ldvl,
float* vr, lapack_int* ldvr, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dggev( char* jobvl, char* jobvr, lapack_int* n, double* a,
lapack_int* lda, double* b, lapack_int* ldb, double* alphar,
double* alphai, double* beta, double* vl, lapack_int* ldvl,
double* vr, lapack_int* ldvr, double* work,
lapack_int* lwork, lapack_int *info );
void LAPACK_cggev( char* jobvl, char* jobvr, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* alpha, lapack_complex_float* beta,
lapack_complex_float* vl, lapack_int* ldvl,
lapack_complex_float* vr, lapack_int* ldvr,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int *info );
void LAPACK_zggev( char* jobvl, char* jobvr, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* alpha, lapack_complex_double* beta,
lapack_complex_double* vl, lapack_int* ldvl,
lapack_complex_double* vr, lapack_int* ldvr,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int *info );
void LAPACK_sggevx( char* balanc, char* jobvl, char* jobvr, char* sense,
lapack_int* n, float* a, lapack_int* lda, float* b,
lapack_int* ldb, float* alphar, float* alphai, float* beta,
float* vl, lapack_int* ldvl, float* vr, lapack_int* ldvr,
lapack_int* ilo, lapack_int* ihi, float* lscale,
float* rscale, float* abnrm, float* bbnrm, float* rconde,
float* rcondv, float* work, lapack_int* lwork,
lapack_int* iwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_dggevx( char* balanc, char* jobvl, char* jobvr, char* sense,
lapack_int* n, double* a, lapack_int* lda, double* b,
lapack_int* ldb, double* alphar, double* alphai,
double* beta, double* vl, lapack_int* ldvl, double* vr,
lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,
double* lscale, double* rscale, double* abnrm,
double* bbnrm, double* rconde, double* rcondv, double* work,
lapack_int* lwork, lapack_int* iwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_cggevx( char* balanc, char* jobvl, char* jobvr, char* sense,
lapack_int* n, lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* alpha, lapack_complex_float* beta,
lapack_complex_float* vl, lapack_int* ldvl,
lapack_complex_float* vr, lapack_int* ldvr, lapack_int* ilo,
lapack_int* ihi, float* lscale, float* rscale, float* abnrm,
float* bbnrm, float* rconde, float* rcondv,
lapack_complex_float* work, lapack_int* lwork, float* rwork,
lapack_int* iwork, lapack_logical* bwork,
lapack_int *info );
void LAPACK_zggevx( char* balanc, char* jobvl, char* jobvr, char* sense,
lapack_int* n, lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* alpha, lapack_complex_double* beta,
lapack_complex_double* vl, lapack_int* ldvl,
lapack_complex_double* vr, lapack_int* ldvr,
lapack_int* ilo, lapack_int* ihi, double* lscale,
double* rscale, double* abnrm, double* bbnrm,
double* rconde, double* rcondv, lapack_complex_double* work,
lapack_int* lwork, double* rwork, lapack_int* iwork,
lapack_logical* bwork, lapack_int *info );
void LAPACK_dsfrk( char* transr, char* uplo, char* trans, lapack_int* n,
lapack_int* k, double* alpha, const double* a,
lapack_int* lda, double* beta, double* c );
void LAPACK_ssfrk( char* transr, char* uplo, char* trans, lapack_int* n,
lapack_int* k, float* alpha, const float* a, lapack_int* lda,
float* beta, float* c );
void LAPACK_zhfrk( char* transr, char* uplo, char* trans, lapack_int* n,
lapack_int* k, double* alpha, const lapack_complex_double* a,
lapack_int* lda, double* beta, lapack_complex_double* c );
void LAPACK_chfrk( char* transr, char* uplo, char* trans, lapack_int* n,
lapack_int* k, float* alpha, const lapack_complex_float* a,
lapack_int* lda, float* beta, lapack_complex_float* c );
void LAPACK_dtfsm( char* transr, char* side, char* uplo, char* trans,
char* diag, lapack_int* m, lapack_int* n, double* alpha,
const double* a, double* b, lapack_int* ldb );
void LAPACK_stfsm( char* transr, char* side, char* uplo, char* trans,
char* diag, lapack_int* m, lapack_int* n, float* alpha,
const float* a, float* b, lapack_int* ldb );
void LAPACK_ztfsm( char* transr, char* side, char* uplo, char* trans,
char* diag, lapack_int* m, lapack_int* n,
lapack_complex_double* alpha, const lapack_complex_double* a,
lapack_complex_double* b, lapack_int* ldb );
void LAPACK_ctfsm( char* transr, char* side, char* uplo, char* trans,
char* diag, lapack_int* m, lapack_int* n,
lapack_complex_float* alpha, const lapack_complex_float* a,
lapack_complex_float* b, lapack_int* ldb );
void LAPACK_dtfttp( char* transr, char* uplo, lapack_int* n, const double* arf,
double* ap, lapack_int *info );
void LAPACK_stfttp( char* transr, char* uplo, lapack_int* n, const float* arf,
float* ap, lapack_int *info );
void LAPACK_ztfttp( char* transr, char* uplo, lapack_int* n,
const lapack_complex_double* arf, lapack_complex_double* ap,
lapack_int *info );
void LAPACK_ctfttp( char* transr, char* uplo, lapack_int* n,
const lapack_complex_float* arf, lapack_complex_float* ap,
lapack_int *info );
void LAPACK_dtfttr( char* transr, char* uplo, lapack_int* n, const double* arf,
double* a, lapack_int* lda, lapack_int *info );
void LAPACK_stfttr( char* transr, char* uplo, lapack_int* n, const float* arf,
float* a, lapack_int* lda, lapack_int *info );
void LAPACK_ztfttr( char* transr, char* uplo, lapack_int* n,
const lapack_complex_double* arf, lapack_complex_double* a,
lapack_int* lda, lapack_int *info );
void LAPACK_ctfttr( char* transr, char* uplo, lapack_int* n,
const lapack_complex_float* arf, lapack_complex_float* a,
lapack_int* lda, lapack_int *info );
void LAPACK_dtpttf( char* transr, char* uplo, lapack_int* n, const double* ap,
double* arf, lapack_int *info );
void LAPACK_stpttf( char* transr, char* uplo, lapack_int* n, const float* ap,
float* arf, lapack_int *info );
void LAPACK_ztpttf( char* transr, char* uplo, lapack_int* n,
const lapack_complex_double* ap, lapack_complex_double* arf,
lapack_int *info );
void LAPACK_ctpttf( char* transr, char* uplo, lapack_int* n,
const lapack_complex_float* ap, lapack_complex_float* arf,
lapack_int *info );
void LAPACK_dtpttr( char* uplo, lapack_int* n, const double* ap, double* a,
lapack_int* lda, lapack_int *info );
void LAPACK_stpttr( char* uplo, lapack_int* n, const float* ap, float* a,
lapack_int* lda, lapack_int *info );
void LAPACK_ztpttr( char* uplo, lapack_int* n, const lapack_complex_double* ap,
lapack_complex_double* a, lapack_int* lda,
lapack_int *info );
void LAPACK_ctpttr( char* uplo, lapack_int* n, const lapack_complex_float* ap,
lapack_complex_float* a, lapack_int* lda,
lapack_int *info );
void LAPACK_dtrttf( char* transr, char* uplo, lapack_int* n, const double* a,
lapack_int* lda, double* arf, lapack_int *info );
void LAPACK_strttf( char* transr, char* uplo, lapack_int* n, const float* a,
lapack_int* lda, float* arf, lapack_int *info );
void LAPACK_ztrttf( char* transr, char* uplo, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* arf, lapack_int *info );
void LAPACK_ctrttf( char* transr, char* uplo, lapack_int* n,
const lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* arf, lapack_int *info );
void LAPACK_dtrttp( char* uplo, lapack_int* n, const double* a, lapack_int* lda,
double* ap, lapack_int *info );
void LAPACK_strttp( char* uplo, lapack_int* n, const float* a, lapack_int* lda,
float* ap, lapack_int *info );
void LAPACK_ztrttp( char* uplo, lapack_int* n, const lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* ap,
lapack_int *info );
void LAPACK_ctrttp( char* uplo, lapack_int* n, const lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* ap,
lapack_int *info );
void LAPACK_sgeqrfp( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* tau, float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_dgeqrfp( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* tau, double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_cgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_zgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int* lwork,
lapack_int *info );
void LAPACK_clacgv( lapack_int* n, lapack_complex_float* x, lapack_int* incx );
void LAPACK_zlacgv( lapack_int* n, lapack_complex_double* x, lapack_int* incx );
void LAPACK_slarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,
float* x );
void LAPACK_dlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,
double* x );
void LAPACK_clarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,
lapack_complex_float* x );
void LAPACK_zlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,
lapack_complex_double* x );
void LAPACK_sgeqr2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* tau, float* work, lapack_int *info );
void LAPACK_dgeqr2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* tau, double* work, lapack_int *info );
void LAPACK_cgeqr2( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zgeqr2( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int *info );
void LAPACK_slacpy( char* uplo, lapack_int* m, lapack_int* n, const float* a,
lapack_int* lda, float* b, lapack_int* ldb );
void LAPACK_dlacpy( char* uplo, lapack_int* m, lapack_int* n, const double* a,
lapack_int* lda, double* b, lapack_int* ldb );
void LAPACK_clacpy( char* uplo, lapack_int* m, lapack_int* n,
const lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb );
void LAPACK_zlacpy( char* uplo, lapack_int* m, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb );
void LAPACK_sgetf2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
lapack_int* ipiv, lapack_int *info );
void LAPACK_dgetf2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
lapack_int* ipiv, lapack_int *info );
void LAPACK_cgetf2( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int* ipiv, lapack_int *info );
void LAPACK_zgetf2( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int* ipiv, lapack_int *info );
void LAPACK_slaswp( lapack_int* n, float* a, lapack_int* lda, lapack_int* k1,
lapack_int* k2, const lapack_int* ipiv, lapack_int* incx );
void LAPACK_dlaswp( lapack_int* n, double* a, lapack_int* lda, lapack_int* k1,
lapack_int* k2, const lapack_int* ipiv, lapack_int* incx );
void LAPACK_claswp( lapack_int* n, lapack_complex_float* a, lapack_int* lda,
lapack_int* k1, lapack_int* k2, const lapack_int* ipiv,
lapack_int* incx );
void LAPACK_zlaswp( lapack_int* n, lapack_complex_double* a, lapack_int* lda,
lapack_int* k1, lapack_int* k2, const lapack_int* ipiv,
lapack_int* incx );
float LAPACK_slange( char* norm, lapack_int* m, lapack_int* n, const float* a,
lapack_int* lda, float* work );
double LAPACK_dlange( char* norm, lapack_int* m, lapack_int* n, const double* a,
lapack_int* lda, double* work );
float LAPACK_clange( char* norm, lapack_int* m, lapack_int* n,
const lapack_complex_float* a, lapack_int* lda, float* work );
double LAPACK_zlange( char* norm, lapack_int* m, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda, double* work );
float LAPACK_clanhe( char* norm, char* uplo, lapack_int* n,
const lapack_complex_float* a, lapack_int* lda, float* work );
double LAPACK_zlanhe( char* norm, char* uplo, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda, double* work );
float LAPACK_slansy( char* norm, char* uplo, lapack_int* n, const float* a,
lapack_int* lda, float* work );
double LAPACK_dlansy( char* norm, char* uplo, lapack_int* n, const double* a,
lapack_int* lda, double* work );
float LAPACK_clansy( char* norm, char* uplo, lapack_int* n,
const lapack_complex_float* a, lapack_int* lda, float* work );
double LAPACK_zlansy( char* norm, char* uplo, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda, double* work );
float LAPACK_slantr( char* norm, char* uplo, char* diag, lapack_int* m,
lapack_int* n, const float* a, lapack_int* lda, float* work );
double LAPACK_dlantr( char* norm, char* uplo, char* diag, lapack_int* m,
lapack_int* n, const double* a, lapack_int* lda, double* work );
float LAPACK_clantr( char* norm, char* uplo, char* diag, lapack_int* m,
lapack_int* n, const lapack_complex_float* a, lapack_int* lda,
float* work );
double LAPACK_zlantr( char* norm, char* uplo, char* diag, lapack_int* m,
lapack_int* n, const lapack_complex_double* a, lapack_int* lda,
double* work );
float LAPACK_slamch( char* cmach );
double LAPACK_dlamch( char* cmach );
void LAPACK_sgelq2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* tau, float* work, lapack_int *info );
void LAPACK_dgelq2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* tau, double* work, lapack_int *info );
void LAPACK_cgelq2( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* tau,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zgelq2( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* tau,
lapack_complex_double* work, lapack_int *info );
void LAPACK_slarfb( char* side, char* trans, char* direct, char* storev,
lapack_int* m, lapack_int* n, lapack_int* k, const float* v,
lapack_int* ldv, const float* t, lapack_int* ldt, float* c,
lapack_int* ldc, float* work, lapack_int* ldwork );
void LAPACK_dlarfb( char* side, char* trans, char* direct, char* storev,
lapack_int* m, lapack_int* n, lapack_int* k,
const double* v, lapack_int* ldv, const double* t,
lapack_int* ldt, double* c, lapack_int* ldc, double* work,
lapack_int* ldwork );
void LAPACK_clarfb( char* side, char* trans, char* direct, char* storev,
lapack_int* m, lapack_int* n, lapack_int* k,
const lapack_complex_float* v, lapack_int* ldv,
const lapack_complex_float* t, lapack_int* ldt,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, lapack_int* ldwork );
void LAPACK_zlarfb( char* side, char* trans, char* direct, char* storev,
lapack_int* m, lapack_int* n, lapack_int* k,
const lapack_complex_double* v, lapack_int* ldv,
const lapack_complex_double* t, lapack_int* ldt,
lapack_complex_double* c, lapack_int* ldc,
lapack_complex_double* work, lapack_int* ldwork );
void LAPACK_slarfg( lapack_int* n, float* alpha, float* x, lapack_int* incx,
float* tau );
void LAPACK_dlarfg( lapack_int* n, double* alpha, double* x, lapack_int* incx,
double* tau );
void LAPACK_clarfg( lapack_int* n, lapack_complex_float* alpha,
lapack_complex_float* x, lapack_int* incx,
lapack_complex_float* tau );
void LAPACK_zlarfg( lapack_int* n, lapack_complex_double* alpha,
lapack_complex_double* x, lapack_int* incx,
lapack_complex_double* tau );
void LAPACK_slarft( char* direct, char* storev, lapack_int* n, lapack_int* k,
const float* v, lapack_int* ldv, const float* tau, float* t,
lapack_int* ldt );
void LAPACK_dlarft( char* direct, char* storev, lapack_int* n, lapack_int* k,
const double* v, lapack_int* ldv, const double* tau,
double* t, lapack_int* ldt );
void LAPACK_clarft( char* direct, char* storev, lapack_int* n, lapack_int* k,
const lapack_complex_float* v, lapack_int* ldv,
const lapack_complex_float* tau, lapack_complex_float* t,
lapack_int* ldt );
void LAPACK_zlarft( char* direct, char* storev, lapack_int* n, lapack_int* k,
const lapack_complex_double* v, lapack_int* ldv,
const lapack_complex_double* tau, lapack_complex_double* t,
lapack_int* ldt );
void LAPACK_slarfx( char* side, lapack_int* m, lapack_int* n, const float* v,
float* tau, float* c, lapack_int* ldc, float* work );
void LAPACK_dlarfx( char* side, lapack_int* m, lapack_int* n, const double* v,
double* tau, double* c, lapack_int* ldc, double* work );
void LAPACK_clarfx( char* side, lapack_int* m, lapack_int* n,
const lapack_complex_float* v, lapack_complex_float* tau,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work );
void LAPACK_zlarfx( char* side, lapack_int* m, lapack_int* n,
const lapack_complex_double* v, lapack_complex_double* tau,
lapack_complex_double* c, lapack_int* ldc,
lapack_complex_double* work );
void LAPACK_slatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,
char* sym, float* d, lapack_int* mode, float* cond,
float* dmax, lapack_int* kl, lapack_int* ku, char* pack,
float* a, lapack_int* lda, float* work, lapack_int *info );
void LAPACK_dlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,
char* sym, double* d, lapack_int* mode, double* cond,
double* dmax, lapack_int* kl, lapack_int* ku, char* pack,
double* a, lapack_int* lda, double* work,
lapack_int *info );
void LAPACK_clatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,
char* sym, float* d, lapack_int* mode, float* cond,
float* dmax, lapack_int* kl, lapack_int* ku, char* pack,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,
char* sym, double* d, lapack_int* mode, double* cond,
double* dmax, lapack_int* kl, lapack_int* ku, char* pack,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* work, lapack_int *info );
void LAPACK_slag2d( lapack_int* m, lapack_int* n, const float* sa,
lapack_int* ldsa, double* a, lapack_int* lda,
lapack_int *info );
void LAPACK_dlag2s( lapack_int* m, lapack_int* n, const double* a,
lapack_int* lda, float* sa, lapack_int* ldsa,
lapack_int *info );
void LAPACK_clag2z( lapack_int* m, lapack_int* n,
const lapack_complex_float* sa, lapack_int* ldsa,
lapack_complex_double* a, lapack_int* lda,
lapack_int *info );
void LAPACK_zlag2c( lapack_int* m, lapack_int* n,
const lapack_complex_double* a, lapack_int* lda,
lapack_complex_float* sa, lapack_int* ldsa,
lapack_int *info );
void LAPACK_slauum( char* uplo, lapack_int* n, float* a, lapack_int* lda,
lapack_int *info );
void LAPACK_dlauum( char* uplo, lapack_int* n, double* a, lapack_int* lda,
lapack_int *info );
void LAPACK_clauum( char* uplo, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_int *info );
void LAPACK_zlauum( char* uplo, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_int *info );
void LAPACK_slagge( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const float* d, float* a, lapack_int* lda,
lapack_int* iseed, float* work, lapack_int *info );
void LAPACK_dlagge( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const double* d, double* a, lapack_int* lda,
lapack_int* iseed, double* work, lapack_int *info );
void LAPACK_clagge( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const float* d, lapack_complex_float* a,
lapack_int* lda, lapack_int* iseed,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zlagge( lapack_int* m, lapack_int* n, lapack_int* kl,
lapack_int* ku, const double* d, lapack_complex_double* a,
lapack_int* lda, lapack_int* iseed,
lapack_complex_double* work, lapack_int *info );
void LAPACK_slaset( char* uplo, lapack_int* m, lapack_int* n, float* alpha,
float* beta, float* a, lapack_int* lda );
void LAPACK_dlaset( char* uplo, lapack_int* m, lapack_int* n, double* alpha,
double* beta, double* a, lapack_int* lda );
void LAPACK_claset( char* uplo, lapack_int* m, lapack_int* n,
lapack_complex_float* alpha, lapack_complex_float* beta,
lapack_complex_float* a, lapack_int* lda );
void LAPACK_zlaset( char* uplo, lapack_int* m, lapack_int* n,
lapack_complex_double* alpha, lapack_complex_double* beta,
lapack_complex_double* a, lapack_int* lda );
void LAPACK_slasrt( char* id, lapack_int* n, float* d, lapack_int *info );
void LAPACK_dlasrt( char* id, lapack_int* n, double* d, lapack_int *info );
void LAPACK_claghe( lapack_int* n, lapack_int* k, const float* d,
lapack_complex_float* a, lapack_int* lda, lapack_int* iseed,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zlaghe( lapack_int* n, lapack_int* k, const double* d,
lapack_complex_double* a, lapack_int* lda,
lapack_int* iseed, lapack_complex_double* work,
lapack_int *info );
void LAPACK_slagsy( lapack_int* n, lapack_int* k, const float* d, float* a,
lapack_int* lda, lapack_int* iseed, float* work,
lapack_int *info );
void LAPACK_dlagsy( lapack_int* n, lapack_int* k, const double* d, double* a,
lapack_int* lda, lapack_int* iseed, double* work,
lapack_int *info );
void LAPACK_clagsy( lapack_int* n, lapack_int* k, const float* d,
lapack_complex_float* a, lapack_int* lda, lapack_int* iseed,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zlagsy( lapack_int* n, lapack_int* k, const double* d,
lapack_complex_double* a, lapack_int* lda,
lapack_int* iseed, lapack_complex_double* work,
lapack_int *info );
void LAPACK_slapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,
float* x, lapack_int* ldx, lapack_int* k );
void LAPACK_dlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,
double* x, lapack_int* ldx, lapack_int* k );
void LAPACK_clapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,
lapack_complex_float* x, lapack_int* ldx, lapack_int* k );
void LAPACK_zlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,
lapack_complex_double* x, lapack_int* ldx, lapack_int* k );
float LAPACK_slapy2( float* x, float* y );
double LAPACK_dlapy2( double* x, double* y );
float LAPACK_slapy3( float* x, float* y, float* z );
double LAPACK_dlapy3( double* x, double* y, double* z );
void LAPACK_slartgp( float* f, float* g, float* cs, float* sn, float* r );
void LAPACK_dlartgp( double* f, double* g, double* cs, double* sn, double* r );
void LAPACK_slartgs( float* x, float* y, float* sigma, float* cs, float* sn );
void LAPACK_dlartgs( double* x, double* y, double* sigma, double* cs,
double* sn );
// LAPACK 3.3.0
void LAPACK_cbbcsd( char* jobu1, char* jobu2,
char* jobv1t, char* jobv2t, char* trans,
lapack_int* m, lapack_int* p, lapack_int* q,
float* theta, float* phi,
lapack_complex_float* u1, lapack_int* ldu1,
lapack_complex_float* u2, lapack_int* ldu2,
lapack_complex_float* v1t, lapack_int* ldv1t,
lapack_complex_float* v2t, lapack_int* ldv2t,
float* b11d, float* b11e, float* b12d,
float* b12e, float* b21d, float* b21e,
float* b22d, float* b22e, float* rwork,
lapack_int* lrwork , lapack_int *info );
void LAPACK_cheswapr( char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* i1,
lapack_int* i2 );
void LAPACK_chetri2( char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int* lwork , lapack_int *info );
void LAPACK_chetri2x( char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int* nb , lapack_int *info );
void LAPACK_chetrs2( char* uplo, lapack_int* n,
lapack_int* nrhs, const lapack_complex_float* a,
lapack_int* lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* work , lapack_int *info );
void LAPACK_csyconv( char* uplo, char* way,
lapack_int* n, lapack_complex_float* a,
lapack_int* lda, const lapack_int* ipiv,
lapack_complex_float* work , lapack_int *info );
void LAPACK_csyswapr( char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* i1,
lapack_int* i2 );
void LAPACK_csytri2( char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int* lwork , lapack_int *info );
void LAPACK_csytri2x( char* uplo, lapack_int* n,
lapack_complex_float* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int* nb , lapack_int *info );
void LAPACK_csytrs2( char* uplo, lapack_int* n,
lapack_int* nrhs, const lapack_complex_float* a,
lapack_int* lda, const lapack_int* ipiv,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* work , lapack_int *info );
void LAPACK_cunbdb( char* trans, char* signs,
lapack_int* m, lapack_int* p, lapack_int* q,
lapack_complex_float* x11, lapack_int* ldx11,
lapack_complex_float* x12, lapack_int* ldx12,
lapack_complex_float* x21, lapack_int* ldx21,
lapack_complex_float* x22, lapack_int* ldx22,
float* theta, float* phi,
lapack_complex_float* taup1,
lapack_complex_float* taup2,
lapack_complex_float* tauq1,
lapack_complex_float* tauq2,
lapack_complex_float* work, lapack_int* lwork , lapack_int *info );
void LAPACK_cuncsd( char* jobu1, char* jobu2,
char* jobv1t, char* jobv2t, char* trans,
char* signs, lapack_int* m, lapack_int* p,
lapack_int* q, lapack_complex_float* x11,
lapack_int* ldx11, lapack_complex_float* x12,
lapack_int* ldx12, lapack_complex_float* x21,
lapack_int* ldx21, lapack_complex_float* x22,
lapack_int* ldx22, float* theta,
lapack_complex_float* u1, lapack_int* ldu1,
lapack_complex_float* u2, lapack_int* ldu2,
lapack_complex_float* v1t, lapack_int* ldv1t,
lapack_complex_float* v2t, lapack_int* ldv2t,
lapack_complex_float* work, lapack_int* lwork,
float* rwork, lapack_int* lrwork,
lapack_int* iwork , lapack_int *info );
void LAPACK_dbbcsd( char* jobu1, char* jobu2,
char* jobv1t, char* jobv2t, char* trans,
lapack_int* m, lapack_int* p, lapack_int* q,
double* theta, double* phi, double* u1,
lapack_int* ldu1, double* u2, lapack_int* ldu2,
double* v1t, lapack_int* ldv1t, double* v2t,
lapack_int* ldv2t, double* b11d, double* b11e,
double* b12d, double* b12e, double* b21d,
double* b21e, double* b22d, double* b22e,
double* work, lapack_int* lwork , lapack_int *info );
void LAPACK_dorbdb( char* trans, char* signs,
lapack_int* m, lapack_int* p, lapack_int* q,
double* x11, lapack_int* ldx11, double* x12,
lapack_int* ldx12, double* x21, lapack_int* ldx21,
double* x22, lapack_int* ldx22, double* theta,
double* phi, double* taup1, double* taup2,
double* tauq1, double* tauq2, double* work,
lapack_int* lwork , lapack_int *info );
void LAPACK_dorcsd( char* jobu1, char* jobu2,
char* jobv1t, char* jobv2t, char* trans,
char* signs, lapack_int* m, lapack_int* p,
lapack_int* q, double* x11, lapack_int* ldx11,
double* x12, lapack_int* ldx12, double* x21,
lapack_int* ldx21, double* x22, lapack_int* ldx22,
double* theta, double* u1, lapack_int* ldu1,
double* u2, lapack_int* ldu2, double* v1t,
lapack_int* ldv1t, double* v2t, lapack_int* ldv2t,
double* work, lapack_int* lwork,
lapack_int* iwork , lapack_int *info );
void LAPACK_dsyconv( char* uplo, char* way,
lapack_int* n, double* a, lapack_int* lda,
const lapack_int* ipiv, double* work , lapack_int *info );
void LAPACK_dsyswapr( char* uplo, lapack_int* n,
double* a, lapack_int* i1, lapack_int* i2 );
void LAPACK_dsytri2( char* uplo, lapack_int* n,
double* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int* lwork , lapack_int *info );
void LAPACK_dsytri2x( char* uplo, lapack_int* n,
double* a, lapack_int* lda,
const lapack_int* ipiv, double* work,
lapack_int* nb , lapack_int *info );
void LAPACK_dsytrs2( char* uplo, lapack_int* n,
lapack_int* nrhs, const double* a,
lapack_int* lda, const lapack_int* ipiv,
double* b, lapack_int* ldb, double* work , lapack_int *info );
void LAPACK_sbbcsd( char* jobu1, char* jobu2,
char* jobv1t, char* jobv2t, char* trans,
lapack_int* m, lapack_int* p, lapack_int* q,
float* theta, float* phi, float* u1,
lapack_int* ldu1, float* u2, lapack_int* ldu2,
float* v1t, lapack_int* ldv1t, float* v2t,
lapack_int* ldv2t, float* b11d, float* b11e,
float* b12d, float* b12e, float* b21d,
float* b21e, float* b22d, float* b22e,
float* work, lapack_int* lwork , lapack_int *info );
void LAPACK_sorbdb( char* trans, char* signs,
lapack_int* m, lapack_int* p, lapack_int* q,
float* x11, lapack_int* ldx11, float* x12,
lapack_int* ldx12, float* x21, lapack_int* ldx21,
float* x22, lapack_int* ldx22, float* theta,
float* phi, float* taup1, float* taup2,
float* tauq1, float* tauq2, float* work,
lapack_int* lwork , lapack_int *info );
void LAPACK_sorcsd( char* jobu1, char* jobu2,
char* jobv1t, char* jobv2t, char* trans,
char* signs, lapack_int* m, lapack_int* p,
lapack_int* q, float* x11, lapack_int* ldx11,
float* x12, lapack_int* ldx12, float* x21,
lapack_int* ldx21, float* x22, lapack_int* ldx22,
float* theta, float* u1, lapack_int* ldu1,
float* u2, lapack_int* ldu2, float* v1t,
lapack_int* ldv1t, float* v2t, lapack_int* ldv2t,
float* work, lapack_int* lwork,
lapack_int* iwork , lapack_int *info );
void LAPACK_ssyconv( char* uplo, char* way,
lapack_int* n, float* a, lapack_int* lda,
const lapack_int* ipiv, float* work , lapack_int *info );
void LAPACK_ssyswapr( char* uplo, lapack_int* n,
float* a, lapack_int* i1, lapack_int* i2 );
void LAPACK_ssytri2( char* uplo, lapack_int* n,
float* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_float* work, lapack_int* lwork , lapack_int *info );
void LAPACK_ssytri2x( char* uplo, lapack_int* n,
float* a, lapack_int* lda,
const lapack_int* ipiv, float* work,
lapack_int* nb , lapack_int *info );
void LAPACK_ssytrs2( char* uplo, lapack_int* n,
lapack_int* nrhs, const float* a,
lapack_int* lda, const lapack_int* ipiv,
float* b, lapack_int* ldb, float* work , lapack_int *info );
void LAPACK_zbbcsd( char* jobu1, char* jobu2,
char* jobv1t, char* jobv2t, char* trans,
lapack_int* m, lapack_int* p, lapack_int* q,
double* theta, double* phi,
lapack_complex_double* u1, lapack_int* ldu1,
lapack_complex_double* u2, lapack_int* ldu2,
lapack_complex_double* v1t, lapack_int* ldv1t,
lapack_complex_double* v2t, lapack_int* ldv2t,
double* b11d, double* b11e, double* b12d,
double* b12e, double* b21d, double* b21e,
double* b22d, double* b22e, double* rwork,
lapack_int* lrwork , lapack_int *info );
void LAPACK_zheswapr( char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* i1,
lapack_int* i2 );
void LAPACK_zhetri2( char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int* lwork , lapack_int *info );
void LAPACK_zhetri2x( char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int* nb , lapack_int *info );
void LAPACK_zhetrs2( char* uplo, lapack_int* n,
lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* work , lapack_int *info );
void LAPACK_zsyconv( char* uplo, char* way,
lapack_int* n, lapack_complex_double* a,
lapack_int* lda, const lapack_int* ipiv,
lapack_complex_double* work , lapack_int *info );
void LAPACK_zsyswapr( char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* i1,
lapack_int* i2 );
void LAPACK_zsytri2( char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int* lwork , lapack_int *info );
void LAPACK_zsytri2x( char* uplo, lapack_int* n,
lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_double* work, lapack_int* nb , lapack_int *info );
void LAPACK_zsytrs2( char* uplo, lapack_int* n,
lapack_int* nrhs,
const lapack_complex_double* a, lapack_int* lda,
const lapack_int* ipiv,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* work , lapack_int *info );
void LAPACK_zunbdb( char* trans, char* signs,
lapack_int* m, lapack_int* p, lapack_int* q,
lapack_complex_double* x11, lapack_int* ldx11,
lapack_complex_double* x12, lapack_int* ldx12,
lapack_complex_double* x21, lapack_int* ldx21,
lapack_complex_double* x22, lapack_int* ldx22,
double* theta, double* phi,
lapack_complex_double* taup1,
lapack_complex_double* taup2,
lapack_complex_double* tauq1,
lapack_complex_double* tauq2,
lapack_complex_double* work, lapack_int* lwork , lapack_int *info );
void LAPACK_zuncsd( char* jobu1, char* jobu2,
char* jobv1t, char* jobv2t, char* trans,
char* signs, lapack_int* m, lapack_int* p,
lapack_int* q, lapack_complex_double* x11,
lapack_int* ldx11, lapack_complex_double* x12,
lapack_int* ldx12, lapack_complex_double* x21,
lapack_int* ldx21, lapack_complex_double* x22,
lapack_int* ldx22, double* theta,
lapack_complex_double* u1, lapack_int* ldu1,
lapack_complex_double* u2, lapack_int* ldu2,
lapack_complex_double* v1t, lapack_int* ldv1t,
lapack_complex_double* v2t, lapack_int* ldv2t,
lapack_complex_double* work, lapack_int* lwork,
double* rwork, lapack_int* lrwork,
lapack_int* iwork , lapack_int *info );
// LAPACK 3.4.0
void LAPACK_sgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* nb, const float* v,
lapack_int* ldv, const float* t, lapack_int* ldt, float* c,
lapack_int* ldc, float* work, lapack_int *info );
void LAPACK_dgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* nb, const double* v,
lapack_int* ldv, const double* t, lapack_int* ldt,
double* c, lapack_int* ldc, double* work,
lapack_int *info );
void LAPACK_cgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* nb,
const lapack_complex_float* v, lapack_int* ldv,
const lapack_complex_float* t, lapack_int* ldt,
lapack_complex_float* c, lapack_int* ldc,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* nb,
const lapack_complex_double* v, lapack_int* ldv,
const lapack_complex_double* t, lapack_int* ldt,
lapack_complex_double* c, lapack_int* ldc,
lapack_complex_double* work, lapack_int *info );
void LAPACK_sgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, float* a,
lapack_int* lda, float* t, lapack_int* ldt, float* work,
lapack_int *info );
void LAPACK_dgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, double* a,
lapack_int* lda, double* t, lapack_int* ldt, double* work,
lapack_int *info );
void LAPACK_cgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* t, lapack_int* ldt,
lapack_complex_float* work, lapack_int *info );
void LAPACK_zgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* t, lapack_int* ldt,
lapack_complex_double* work, lapack_int *info );
void LAPACK_sgeqrt2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* t, lapack_int* ldt, lapack_int *info );
void LAPACK_dgeqrt2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* t, lapack_int* ldt, lapack_int *info );
void LAPACK_cgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* t, lapack_int* ldt,
lapack_int *info );
void LAPACK_zgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* t, lapack_int* ldt,
lapack_int *info );
void LAPACK_sgeqrt3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* t, lapack_int* ldt, lapack_int *info );
void LAPACK_dgeqrt3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* t, lapack_int* ldt, lapack_int *info );
void LAPACK_cgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* t, lapack_int* ldt,
lapack_int *info );
void LAPACK_zgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* t, lapack_int* ldt,
lapack_int *info );
void LAPACK_stpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* l, lapack_int* nb,
const float* v, lapack_int* ldv, const float* t,
lapack_int* ldt, float* a, lapack_int* lda, float* b,
lapack_int* ldb, float* work, lapack_int *info );
void LAPACK_dtpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* l, lapack_int* nb,
const double* v, lapack_int* ldv, const double* t,
lapack_int* ldt, double* a, lapack_int* lda, double* b,
lapack_int* ldb, double* work, lapack_int *info );
void LAPACK_ctpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* l, lapack_int* nb,
const lapack_complex_float* v, lapack_int* ldv,
const lapack_complex_float* t, lapack_int* ldt,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* work, lapack_int *info );
void LAPACK_ztpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,
lapack_int* k, lapack_int* l, lapack_int* nb,
const lapack_complex_double* v, lapack_int* ldv,
const lapack_complex_double* t, lapack_int* ldt,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* work, lapack_int *info );
void LAPACK_dtpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb,
double* a, lapack_int* lda, double* b, lapack_int* ldb,
double* t, lapack_int* ldt, double* work,
lapack_int *info );
void LAPACK_ctpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* t, lapack_complex_float* b,
lapack_int* ldb, lapack_int* ldt,
lapack_complex_float* work, lapack_int *info );
void LAPACK_ztpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* t, lapack_int* ldt,
lapack_complex_double* work, lapack_int *info );
void LAPACK_stpqrt2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,
float* b, lapack_int* ldb, float* t, lapack_int* ldt,
lapack_int *info );
void LAPACK_dtpqrt2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,
double* b, lapack_int* ldb, double* t, lapack_int* ldt,
lapack_int *info );
void LAPACK_ctpqrt2( lapack_int* m, lapack_int* n, lapack_complex_float* a,
lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,
lapack_complex_float* t, lapack_int* ldt,
lapack_int *info );
void LAPACK_ztpqrt2( lapack_int* m, lapack_int* n, lapack_complex_double* a,
lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,
lapack_complex_double* t, lapack_int* ldt,
lapack_int *info );
void LAPACK_stprfb( char* side, char* trans, char* direct, char* storev,
lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,
const float* v, lapack_int* ldv, const float* t,
lapack_int* ldt, float* a, lapack_int* lda, float* b,
lapack_int* ldb, const float* mywork,
lapack_int* myldwork );
void LAPACK_dtprfb( char* side, char* trans, char* direct, char* storev,
lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,
const double* v, lapack_int* ldv, const double* t,
lapack_int* ldt, double* a, lapack_int* lda, double* b,
lapack_int* ldb, const double* mywork,
lapack_int* myldwork );
void LAPACK_ctprfb( char* side, char* trans, char* direct, char* storev,
lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,
const lapack_complex_float* v, lapack_int* ldv,
const lapack_complex_float* t, lapack_int* ldt,
lapack_complex_float* a, lapack_int* lda,
lapack_complex_float* b, lapack_int* ldb,
const float* mywork, lapack_int* myldwork );
void LAPACK_ztprfb( char* side, char* trans, char* direct, char* storev,
lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,
const lapack_complex_double* v, lapack_int* ldv,
const lapack_complex_double* t, lapack_int* ldt,
lapack_complex_double* a, lapack_int* lda,
lapack_complex_double* b, lapack_int* ldb,
const double* mywork, lapack_int* myldwork );
// LAPACK 3.X.X
void LAPACK_csyr( char* uplo, lapack_int* n, lapack_complex_float* alpha,
const lapack_complex_float* x, lapack_int* incx,
lapack_complex_float* a, lapack_int* lda );
void LAPACK_zsyr( char* uplo, lapack_int* n, lapack_complex_double* alpha,
const lapack_complex_double* x, lapack_int* incx,
lapack_complex_double* a, lapack_int* lda );
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* _LAPACKE_H_ */
#endif /* _MKL_LAPACKE_H_ */
| 1,058,368 | 63.962497 | 89 |
h
|
abess
|
abess-master/python/include/Eigen/src/misc/lapacke_mangling.h
|
#ifndef LAPACK_HEADER_INCLUDED
#define LAPACK_HEADER_INCLUDED
#ifndef LAPACK_GLOBAL
#if defined(LAPACK_GLOBAL_PATTERN_LC) || defined(ADD_)
#define LAPACK_GLOBAL(lcname,UCNAME) lcname##_
#elif defined(LAPACK_GLOBAL_PATTERN_UC) || defined(UPPER)
#define LAPACK_GLOBAL(lcname,UCNAME) UCNAME
#elif defined(LAPACK_GLOBAL_PATTERN_MC) || defined(NOCHANGE)
#define LAPACK_GLOBAL(lcname,UCNAME) lcname
#else
#define LAPACK_GLOBAL(lcname,UCNAME) lcname##_
#endif
#endif
#endif
| 474 | 25.388889 | 60 |
h
|
abess
|
abess-master/python/include/Eigen/src/plugins/ArrayCwiseBinaryOps.h
|
/** \returns an expression of the coefficient wise product of \c *this and \a other
*
* \sa MatrixBase::cwiseProduct
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)
operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived());
}
/** \returns an expression of the coefficient wise quotient of \c *this and \a other
*
* \sa MatrixBase::cwiseQuotient
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar,typename OtherDerived::Scalar>, const Derived, const OtherDerived>
operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return CwiseBinaryOp<internal::scalar_quotient_op<Scalar,typename OtherDerived::Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
/** \returns an expression of the coefficient-wise min of \c *this and \a other
*
* Example: \include Cwise_min.cpp
* Output: \verbinclude Cwise_min.out
*
* \sa max()
*/
EIGEN_MAKE_CWISE_BINARY_OP(min,min)
/** \returns an expression of the coefficient-wise min of \c *this and scalar \a other
*
* \sa max()
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived,
const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
#ifdef EIGEN_PARSED_BY_DOXYGEN
min
#else
(min)
#endif
(const Scalar &other) const
{
return (min)(Derived::PlainObject::Constant(rows(), cols(), other));
}
/** \returns an expression of the coefficient-wise max of \c *this and \a other
*
* Example: \include Cwise_max.cpp
* Output: \verbinclude Cwise_max.out
*
* \sa min()
*/
EIGEN_MAKE_CWISE_BINARY_OP(max,max)
/** \returns an expression of the coefficient-wise max of \c *this and scalar \a other
*
* \sa min()
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived,
const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
#ifdef EIGEN_PARSED_BY_DOXYGEN
max
#else
(max)
#endif
(const Scalar &other) const
{
return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
}
/** \returns an expression of the coefficient-wise power of \c *this to the given array of \a exponents.
*
* This function computes the coefficient-wise power.
*
* Example: \include Cwise_array_power_array.cpp
* Output: \verbinclude Cwise_array_power_array.out
*/
EIGEN_MAKE_CWISE_BINARY_OP(pow,pow)
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(pow,pow)
#else
/** \returns an expression of the coefficients of \c *this rasied to the constant power \a exponent
*
* \tparam T is the scalar type of \a exponent. It must be compatible with the scalar type of the given expression.
*
* This function computes the coefficient-wise power. The function MatrixBase::pow() in the
* unsupported module MatrixFunctions computes the matrix power.
*
* Example: \include Cwise_pow.cpp
* Output: \verbinclude Cwise_pow.out
*
* \sa ArrayBase::pow(ArrayBase), square(), cube(), exp(), log()
*/
template<typename T>
const CwiseBinaryOp<internal::scalar_pow_op<Scalar,T>,Derived,Constant<T> > pow(const T& exponent) const;
#endif
// TODO code generating macros could be moved to Macros.h and could include generation of documentation
#define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \
template<typename OtherDerived> \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, typename OtherDerived::Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived> \
OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
{ \
return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, typename OtherDerived::Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived>(derived(), other.derived()); \
}\
typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \
typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,Scalar, internal::cmp_ ## COMPARATOR>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \
OP(const Scalar& s) const { \
return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \
} \
EIGEN_DEVICE_FUNC friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \
OP(const Scalar& s, const Derived& d) { \
return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \
}
#define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \
template<typename OtherDerived> \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<typename OtherDerived::Scalar, Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived> \
OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
{ \
return CwiseBinaryOp<internal::scalar_cmp_op<typename OtherDerived::Scalar, Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived>(other.derived(), derived()); \
} \
EIGEN_DEVICE_FUNC \
inline const RCmp ## RCOMPARATOR ## ReturnType \
OP(const Scalar& s) const { \
return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \
} \
friend inline const Cmp ## RCOMPARATOR ## ReturnType \
OP(const Scalar& s, const Derived& d) { \
return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \
}
/** \returns an expression of the coefficient-wise \< operator of *this and \a other
*
* Example: \include Cwise_less.cpp
* Output: \verbinclude Cwise_less.out
*
* \sa all(), any(), operator>(), operator<=()
*/
EIGEN_MAKE_CWISE_COMP_OP(operator<, LT)
/** \returns an expression of the coefficient-wise \<= operator of *this and \a other
*
* Example: \include Cwise_less_equal.cpp
* Output: \verbinclude Cwise_less_equal.out
*
* \sa all(), any(), operator>=(), operator<()
*/
EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE)
/** \returns an expression of the coefficient-wise \> operator of *this and \a other
*
* Example: \include Cwise_greater.cpp
* Output: \verbinclude Cwise_greater.out
*
* \sa all(), any(), operator>=(), operator<()
*/
EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT)
/** \returns an expression of the coefficient-wise \>= operator of *this and \a other
*
* Example: \include Cwise_greater_equal.cpp
* Output: \verbinclude Cwise_greater_equal.out
*
* \sa all(), any(), operator>(), operator<=()
*/
EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE)
/** \returns an expression of the coefficient-wise == operator of *this and \a other
*
* \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
* In order to check for equality between two vectors or matrices with floating-point coefficients, it is
* generally a far better idea to use a fuzzy comparison as provided by isApprox() and
* isMuchSmallerThan().
*
* Example: \include Cwise_equal_equal.cpp
* Output: \verbinclude Cwise_equal_equal.out
*
* \sa all(), any(), isApprox(), isMuchSmallerThan()
*/
EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ)
/** \returns an expression of the coefficient-wise != operator of *this and \a other
*
* \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
* In order to check for equality between two vectors or matrices with floating-point coefficients, it is
* generally a far better idea to use a fuzzy comparison as provided by isApprox() and
* isMuchSmallerThan().
*
* Example: \include Cwise_not_equal.cpp
* Output: \verbinclude Cwise_not_equal.out
*
* \sa all(), any(), isApprox(), isMuchSmallerThan()
*/
EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ)
#undef EIGEN_MAKE_CWISE_COMP_OP
#undef EIGEN_MAKE_CWISE_COMP_R_OP
// scalar addition
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_MAKE_SCALAR_BINARY_OP(operator+,sum)
#else
/** \returns an expression of \c *this with each coeff incremented by the constant \a scalar
*
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
*
* Example: \include Cwise_plus.cpp
* Output: \verbinclude Cwise_plus.out
*
* \sa operator+=(), operator-()
*/
template<typename T>
const CwiseBinaryOp<internal::scalar_sum_op<Scalar,T>,Derived,Constant<T> > operator+(const T& scalar) const;
/** \returns an expression of \a expr with each coeff incremented by the constant \a scalar
*
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
*/
template<typename T> friend
const CwiseBinaryOp<internal::scalar_sum_op<T,Scalar>,Constant<T>,Derived> operator+(const T& scalar, const StorageBaseType& expr);
#endif
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_MAKE_SCALAR_BINARY_OP(operator-,difference)
#else
/** \returns an expression of \c *this with each coeff decremented by the constant \a scalar
*
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
*
* Example: \include Cwise_minus.cpp
* Output: \verbinclude Cwise_minus.out
*
* \sa operator+=(), operator-()
*/
template<typename T>
const CwiseBinaryOp<internal::scalar_difference_op<Scalar,T>,Derived,Constant<T> > operator-(const T& scalar) const;
/** \returns an expression of the constant matrix of value \a scalar decremented by the coefficients of \a expr
*
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
*/
template<typename T> friend
const CwiseBinaryOp<internal::scalar_difference_op<T,Scalar>,Constant<T>,Derived> operator-(const T& scalar, const StorageBaseType& expr);
#endif
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(operator/,quotient)
#else
/**
* \brief Component-wise division of the scalar \a s by array elements of \a a.
*
* \tparam Scalar is the scalar type of \a x. It must be compatible with the scalar type of the given array expression (\c Derived::Scalar).
*/
template<typename T> friend
inline const CwiseBinaryOp<internal::scalar_quotient_op<T,Scalar>,Constant<T>,Derived>
operator/(const T& s,const StorageBaseType& a);
#endif
/** \returns an expression of the coefficient-wise ^ operator of *this and \a other
*
* \warning this operator is for expression of bool only.
*
* Example: \include Cwise_boolean_xor.cpp
* Output: \verbinclude Cwise_boolean_xor.out
*
* \sa operator&&(), select()
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline const CwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived>
operator^(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
return CwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived>(derived(),other.derived());
}
// NOTE disabled until we agree on argument order
#if 0
/** \cpp11 \returns an expression of the coefficient-wise polygamma function.
*
* \specialfunctions_module
*
* It returns the \a n -th derivative of the digamma(psi) evaluated at \c *this.
*
* \warning Be careful with the order of the parameters: x.polygamma(n) is equivalent to polygamma(n,x)
*
* \sa Eigen::polygamma()
*/
template<typename DerivedN>
inline const CwiseBinaryOp<internal::scalar_polygamma_op<Scalar>, const DerivedN, const Derived>
polygamma(const EIGEN_CURRENT_STORAGE_BASE_CLASS<DerivedN> &n) const
{
return CwiseBinaryOp<internal::scalar_polygamma_op<Scalar>, const DerivedN, const Derived>(n.derived(), this->derived());
}
#endif
/** \returns an expression of the coefficient-wise zeta function.
*
* \specialfunctions_module
*
* It returns the Riemann zeta function of two arguments \c *this and \a q:
*
* \param *this is the exposent, it must be > 1
* \param q is the shift, it must be > 0
*
* \note This function supports only float and double scalar types. To support other scalar types, the user has
* to provide implementations of zeta(T,T) for any scalar type T to be supported.
*
* This method is an alias for zeta(*this,q);
*
* \sa Eigen::zeta()
*/
template<typename DerivedQ>
inline const CwiseBinaryOp<internal::scalar_zeta_op<Scalar>, const Derived, const DerivedQ>
zeta(const EIGEN_CURRENT_STORAGE_BASE_CLASS<DerivedQ> &q) const
{
return CwiseBinaryOp<internal::scalar_zeta_op<Scalar>, const Derived, const DerivedQ>(this->derived(), q.derived());
}
| 13,132 | 38.438438 | 216 |
h
|
abess
|
abess-master/python/include/Eigen/src/plugins/ArrayCwiseUnaryOps.h
|
typedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> AbsReturnType;
typedef CwiseUnaryOp<internal::scalar_arg_op<Scalar>, const Derived> ArgReturnType;
typedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> Abs2ReturnType;
typedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> SqrtReturnType;
typedef CwiseUnaryOp<internal::scalar_rsqrt_op<Scalar>, const Derived> RsqrtReturnType;
typedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> SignReturnType;
typedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> InverseReturnType;
typedef CwiseUnaryOp<internal::scalar_boolean_not_op<Scalar>, const Derived> BooleanNotReturnType;
typedef CwiseUnaryOp<internal::scalar_exp_op<Scalar>, const Derived> ExpReturnType;
typedef CwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived> LogReturnType;
typedef CwiseUnaryOp<internal::scalar_log1p_op<Scalar>, const Derived> Log1pReturnType;
typedef CwiseUnaryOp<internal::scalar_log10_op<Scalar>, const Derived> Log10ReturnType;
typedef CwiseUnaryOp<internal::scalar_cos_op<Scalar>, const Derived> CosReturnType;
typedef CwiseUnaryOp<internal::scalar_sin_op<Scalar>, const Derived> SinReturnType;
typedef CwiseUnaryOp<internal::scalar_tan_op<Scalar>, const Derived> TanReturnType;
typedef CwiseUnaryOp<internal::scalar_acos_op<Scalar>, const Derived> AcosReturnType;
typedef CwiseUnaryOp<internal::scalar_asin_op<Scalar>, const Derived> AsinReturnType;
typedef CwiseUnaryOp<internal::scalar_atan_op<Scalar>, const Derived> AtanReturnType;
typedef CwiseUnaryOp<internal::scalar_tanh_op<Scalar>, const Derived> TanhReturnType;
typedef CwiseUnaryOp<internal::scalar_sinh_op<Scalar>, const Derived> SinhReturnType;
typedef CwiseUnaryOp<internal::scalar_cosh_op<Scalar>, const Derived> CoshReturnType;
typedef CwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived> SquareReturnType;
typedef CwiseUnaryOp<internal::scalar_cube_op<Scalar>, const Derived> CubeReturnType;
typedef CwiseUnaryOp<internal::scalar_round_op<Scalar>, const Derived> RoundReturnType;
typedef CwiseUnaryOp<internal::scalar_floor_op<Scalar>, const Derived> FloorReturnType;
typedef CwiseUnaryOp<internal::scalar_ceil_op<Scalar>, const Derived> CeilReturnType;
typedef CwiseUnaryOp<internal::scalar_isnan_op<Scalar>, const Derived> IsNaNReturnType;
typedef CwiseUnaryOp<internal::scalar_isinf_op<Scalar>, const Derived> IsInfReturnType;
typedef CwiseUnaryOp<internal::scalar_isfinite_op<Scalar>, const Derived> IsFiniteReturnType;
/** \returns an expression of the coefficient-wise absolute value of \c *this
*
* Example: \include Cwise_abs.cpp
* Output: \verbinclude Cwise_abs.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_abs">Math functions</a>, abs2()
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const AbsReturnType
abs() const
{
return AbsReturnType(derived());
}
/** \returns an expression of the coefficient-wise phase angle of \c *this
*
* Example: \include Cwise_arg.cpp
* Output: \verbinclude Cwise_arg.out
*
* \sa abs()
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const ArgReturnType
arg() const
{
return ArgReturnType(derived());
}
/** \returns an expression of the coefficient-wise squared absolute value of \c *this
*
* Example: \include Cwise_abs2.cpp
* Output: \verbinclude Cwise_abs2.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_abs2">Math functions</a>, abs(), square()
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Abs2ReturnType
abs2() const
{
return Abs2ReturnType(derived());
}
/** \returns an expression of the coefficient-wise exponential of *this.
*
* This function computes the coefficient-wise exponential. The function MatrixBase::exp() in the
* unsupported module MatrixFunctions computes the matrix exponential.
*
* Example: \include Cwise_exp.cpp
* Output: \verbinclude Cwise_exp.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_exp">Math functions</a>, pow(), log(), sin(), cos()
*/
EIGEN_DEVICE_FUNC
inline const ExpReturnType
exp() const
{
return ExpReturnType(derived());
}
/** \returns an expression of the coefficient-wise logarithm of *this.
*
* This function computes the coefficient-wise logarithm. The function MatrixBase::log() in the
* unsupported module MatrixFunctions computes the matrix logarithm.
*
* Example: \include Cwise_log.cpp
* Output: \verbinclude Cwise_log.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_log">Math functions</a>, exp()
*/
EIGEN_DEVICE_FUNC
inline const LogReturnType
log() const
{
return LogReturnType(derived());
}
/** \returns an expression of the coefficient-wise logarithm of 1 plus \c *this.
*
* In exact arithmetic, \c x.log() is equivalent to \c (x+1).log(),
* however, with finite precision, this function is much more accurate when \c x is close to zero.
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_log1p">Math functions</a>, log()
*/
EIGEN_DEVICE_FUNC
inline const Log1pReturnType
log1p() const
{
return Log1pReturnType(derived());
}
/** \returns an expression of the coefficient-wise base-10 logarithm of *this.
*
* This function computes the coefficient-wise base-10 logarithm.
*
* Example: \include Cwise_log10.cpp
* Output: \verbinclude Cwise_log10.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_log10">Math functions</a>, log()
*/
EIGEN_DEVICE_FUNC
inline const Log10ReturnType
log10() const
{
return Log10ReturnType(derived());
}
/** \returns an expression of the coefficient-wise square root of *this.
*
* This function computes the coefficient-wise square root. The function MatrixBase::sqrt() in the
* unsupported module MatrixFunctions computes the matrix square root.
*
* Example: \include Cwise_sqrt.cpp
* Output: \verbinclude Cwise_sqrt.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_sqrt">Math functions</a>, pow(), square()
*/
EIGEN_DEVICE_FUNC
inline const SqrtReturnType
sqrt() const
{
return SqrtReturnType(derived());
}
/** \returns an expression of the coefficient-wise inverse square root of *this.
*
* This function computes the coefficient-wise inverse square root.
*
* Example: \include Cwise_sqrt.cpp
* Output: \verbinclude Cwise_sqrt.out
*
* \sa pow(), square()
*/
EIGEN_DEVICE_FUNC
inline const RsqrtReturnType
rsqrt() const
{
return RsqrtReturnType(derived());
}
/** \returns an expression of the coefficient-wise signum of *this.
*
* This function computes the coefficient-wise signum.
*
* Example: \include Cwise_sign.cpp
* Output: \verbinclude Cwise_sign.out
*
* \sa pow(), square()
*/
EIGEN_DEVICE_FUNC
inline const SignReturnType
sign() const
{
return SignReturnType(derived());
}
/** \returns an expression of the coefficient-wise cosine of *this.
*
* This function computes the coefficient-wise cosine. The function MatrixBase::cos() in the
* unsupported module MatrixFunctions computes the matrix cosine.
*
* Example: \include Cwise_cos.cpp
* Output: \verbinclude Cwise_cos.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cos">Math functions</a>, sin(), acos()
*/
EIGEN_DEVICE_FUNC
inline const CosReturnType
cos() const
{
return CosReturnType(derived());
}
/** \returns an expression of the coefficient-wise sine of *this.
*
* This function computes the coefficient-wise sine. The function MatrixBase::sin() in the
* unsupported module MatrixFunctions computes the matrix sine.
*
* Example: \include Cwise_sin.cpp
* Output: \verbinclude Cwise_sin.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_sin">Math functions</a>, cos(), asin()
*/
EIGEN_DEVICE_FUNC
inline const SinReturnType
sin() const
{
return SinReturnType(derived());
}
/** \returns an expression of the coefficient-wise tan of *this.
*
* Example: \include Cwise_tan.cpp
* Output: \verbinclude Cwise_tan.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_tan">Math functions</a>, cos(), sin()
*/
EIGEN_DEVICE_FUNC
inline const TanReturnType
tan() const
{
return TanReturnType(derived());
}
/** \returns an expression of the coefficient-wise arc tan of *this.
*
* Example: \include Cwise_atan.cpp
* Output: \verbinclude Cwise_atan.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_atan">Math functions</a>, tan(), asin(), acos()
*/
EIGEN_DEVICE_FUNC
inline const AtanReturnType
atan() const
{
return AtanReturnType(derived());
}
/** \returns an expression of the coefficient-wise arc cosine of *this.
*
* Example: \include Cwise_acos.cpp
* Output: \verbinclude Cwise_acos.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_acos">Math functions</a>, cos(), asin()
*/
EIGEN_DEVICE_FUNC
inline const AcosReturnType
acos() const
{
return AcosReturnType(derived());
}
/** \returns an expression of the coefficient-wise arc sine of *this.
*
* Example: \include Cwise_asin.cpp
* Output: \verbinclude Cwise_asin.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_asin">Math functions</a>, sin(), acos()
*/
EIGEN_DEVICE_FUNC
inline const AsinReturnType
asin() const
{
return AsinReturnType(derived());
}
/** \returns an expression of the coefficient-wise hyperbolic tan of *this.
*
* Example: \include Cwise_tanh.cpp
* Output: \verbinclude Cwise_tanh.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_tanh">Math functions</a>, tan(), sinh(), cosh()
*/
EIGEN_DEVICE_FUNC
inline const TanhReturnType
tanh() const
{
return TanhReturnType(derived());
}
/** \returns an expression of the coefficient-wise hyperbolic sin of *this.
*
* Example: \include Cwise_sinh.cpp
* Output: \verbinclude Cwise_sinh.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_sinh">Math functions</a>, sin(), tanh(), cosh()
*/
EIGEN_DEVICE_FUNC
inline const SinhReturnType
sinh() const
{
return SinhReturnType(derived());
}
/** \returns an expression of the coefficient-wise hyperbolic cos of *this.
*
* Example: \include Cwise_cosh.cpp
* Output: \verbinclude Cwise_cosh.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cosh">Math functions</a>, tan(), sinh(), cosh()
*/
EIGEN_DEVICE_FUNC
inline const CoshReturnType
cosh() const
{
return CoshReturnType(derived());
}
/** \returns an expression of the coefficient-wise inverse of *this.
*
* Example: \include Cwise_inverse.cpp
* Output: \verbinclude Cwise_inverse.out
*
* \sa operator/(), operator*()
*/
EIGEN_DEVICE_FUNC
inline const InverseReturnType
inverse() const
{
return InverseReturnType(derived());
}
/** \returns an expression of the coefficient-wise square of *this.
*
* Example: \include Cwise_square.cpp
* Output: \verbinclude Cwise_square.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_squareE">Math functions</a>, abs2(), cube(), pow()
*/
EIGEN_DEVICE_FUNC
inline const SquareReturnType
square() const
{
return SquareReturnType(derived());
}
/** \returns an expression of the coefficient-wise cube of *this.
*
* Example: \include Cwise_cube.cpp
* Output: \verbinclude Cwise_cube.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cube">Math functions</a>, square(), pow()
*/
EIGEN_DEVICE_FUNC
inline const CubeReturnType
cube() const
{
return CubeReturnType(derived());
}
/** \returns an expression of the coefficient-wise round of *this.
*
* Example: \include Cwise_round.cpp
* Output: \verbinclude Cwise_round.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_round">Math functions</a>, ceil(), floor()
*/
EIGEN_DEVICE_FUNC
inline const RoundReturnType
round() const
{
return RoundReturnType(derived());
}
/** \returns an expression of the coefficient-wise floor of *this.
*
* Example: \include Cwise_floor.cpp
* Output: \verbinclude Cwise_floor.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_floor">Math functions</a>, ceil(), round()
*/
EIGEN_DEVICE_FUNC
inline const FloorReturnType
floor() const
{
return FloorReturnType(derived());
}
/** \returns an expression of the coefficient-wise ceil of *this.
*
* Example: \include Cwise_ceil.cpp
* Output: \verbinclude Cwise_ceil.out
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_ceil">Math functions</a>, floor(), round()
*/
EIGEN_DEVICE_FUNC
inline const CeilReturnType
ceil() const
{
return CeilReturnType(derived());
}
/** \returns an expression of the coefficient-wise isnan of *this.
*
* Example: \include Cwise_isNaN.cpp
* Output: \verbinclude Cwise_isNaN.out
*
* \sa isfinite(), isinf()
*/
EIGEN_DEVICE_FUNC
inline const IsNaNReturnType
isNaN() const
{
return IsNaNReturnType(derived());
}
/** \returns an expression of the coefficient-wise isinf of *this.
*
* Example: \include Cwise_isInf.cpp
* Output: \verbinclude Cwise_isInf.out
*
* \sa isnan(), isfinite()
*/
EIGEN_DEVICE_FUNC
inline const IsInfReturnType
isInf() const
{
return IsInfReturnType(derived());
}
/** \returns an expression of the coefficient-wise isfinite of *this.
*
* Example: \include Cwise_isFinite.cpp
* Output: \verbinclude Cwise_isFinite.out
*
* \sa isnan(), isinf()
*/
EIGEN_DEVICE_FUNC
inline const IsFiniteReturnType
isFinite() const
{
return IsFiniteReturnType(derived());
}
/** \returns an expression of the coefficient-wise ! operator of *this
*
* \warning this operator is for expression of bool only.
*
* Example: \include Cwise_boolean_not.cpp
* Output: \verbinclude Cwise_boolean_not.out
*
* \sa operator!=()
*/
EIGEN_DEVICE_FUNC
inline const BooleanNotReturnType
operator!() const
{
EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value),
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
return BooleanNotReturnType(derived());
}
// --- SpecialFunctions module ---
typedef CwiseUnaryOp<internal::scalar_lgamma_op<Scalar>, const Derived> LgammaReturnType;
typedef CwiseUnaryOp<internal::scalar_digamma_op<Scalar>, const Derived> DigammaReturnType;
typedef CwiseUnaryOp<internal::scalar_erf_op<Scalar>, const Derived> ErfReturnType;
typedef CwiseUnaryOp<internal::scalar_erfc_op<Scalar>, const Derived> ErfcReturnType;
/** \cpp11 \returns an expression of the coefficient-wise ln(|gamma(*this)|).
*
* \specialfunctions_module
*
* Example: \include Cwise_lgamma.cpp
* Output: \verbinclude Cwise_lgamma.out
*
* \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
* or float/double in non c++11 mode, the user has to provide implementations of lgamma(T) for any scalar
* type T to be supported.
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_lgamma">Math functions</a>, digamma()
*/
EIGEN_DEVICE_FUNC
inline const LgammaReturnType
lgamma() const
{
return LgammaReturnType(derived());
}
/** \returns an expression of the coefficient-wise digamma (psi, derivative of lgamma).
*
* \specialfunctions_module
*
* \note This function supports only float and double scalar types. To support other scalar types,
* the user has to provide implementations of digamma(T) for any scalar
* type T to be supported.
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_digamma">Math functions</a>, Eigen::digamma(), Eigen::polygamma(), lgamma()
*/
EIGEN_DEVICE_FUNC
inline const DigammaReturnType
digamma() const
{
return DigammaReturnType(derived());
}
/** \cpp11 \returns an expression of the coefficient-wise Gauss error
* function of *this.
*
* \specialfunctions_module
*
* Example: \include Cwise_erf.cpp
* Output: \verbinclude Cwise_erf.out
*
* \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
* or float/double in non c++11 mode, the user has to provide implementations of erf(T) for any scalar
* type T to be supported.
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_erf">Math functions</a>, erfc()
*/
EIGEN_DEVICE_FUNC
inline const ErfReturnType
erf() const
{
return ErfReturnType(derived());
}
/** \cpp11 \returns an expression of the coefficient-wise Complementary error
* function of *this.
*
* \specialfunctions_module
*
* Example: \include Cwise_erfc.cpp
* Output: \verbinclude Cwise_erfc.out
*
* \note This function supports only float and double scalar types in c++11 mode. To support other scalar types,
* or float/double in non c++11 mode, the user has to provide implementations of erfc(T) for any scalar
* type T to be supported.
*
* \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_erfc">Math functions</a>, erf()
*/
EIGEN_DEVICE_FUNC
inline const ErfcReturnType
erfc() const
{
return ErfcReturnType(derived());
}
| 16,929 | 29.614828 | 138 |
h
|
abess
|
abess-master/python/include/Eigen/src/plugins/BlockMethods.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2010 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARSED_BY_DOXYGEN
/// \internal expression type of a column */
typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ColXpr;
typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ConstColXpr;
/// \internal expression type of a row */
typedef Block<Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowXpr;
typedef const Block<const Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowXpr;
/// \internal expression type of a block of whole columns */
typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ColsBlockXpr;
typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ConstColsBlockXpr;
/// \internal expression type of a block of whole rows */
typedef Block<Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowsBlockXpr;
typedef const Block<const Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowsBlockXpr;
/// \internal expression type of a block of whole columns */
template<int N> struct NColsBlockXpr { typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; };
template<int N> struct ConstNColsBlockXpr { typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; };
/// \internal expression type of a block of whole rows */
template<int N> struct NRowsBlockXpr { typedef Block<Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };
template<int N> struct ConstNRowsBlockXpr { typedef const Block<const Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };
/// \internal expression of a block */
typedef Block<Derived> BlockXpr;
typedef const Block<const Derived> ConstBlockXpr;
/// \internal expression of a block of fixed sizes */
template<int Rows, int Cols> struct FixedBlockXpr { typedef Block<Derived,Rows,Cols> Type; };
template<int Rows, int Cols> struct ConstFixedBlockXpr { typedef Block<const Derived,Rows,Cols> Type; };
typedef VectorBlock<Derived> SegmentReturnType;
typedef const VectorBlock<const Derived> ConstSegmentReturnType;
template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
#endif // not EIGEN_PARSED_BY_DOXYGEN
/// \returns a dynamic-size expression of a block in *this.
///
/// \param startRow the first row in the block
/// \param startCol the first column in the block
/// \param blockRows the number of rows in the block
/// \param blockCols the number of columns in the block
///
/// Example: \include MatrixBase_block_int_int_int_int.cpp
/// Output: \verbinclude MatrixBase_block_int_int_int_int.out
///
/// \note Even though the returned expression has dynamic size, in the case
/// when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
/// which means that evaluating it does not cause a dynamic memory allocation.
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index)
///
EIGEN_DEVICE_FUNC
inline BlockXpr block(Index startRow, Index startCol, Index blockRows, Index blockCols)
{
return BlockXpr(derived(), startRow, startCol, blockRows, blockCols);
}
/// This is the const version of block(Index,Index,Index,Index). */
EIGEN_DEVICE_FUNC
inline const ConstBlockXpr block(Index startRow, Index startCol, Index blockRows, Index blockCols) const
{
return ConstBlockXpr(derived(), startRow, startCol, blockRows, blockCols);
}
/// \returns a dynamic-size expression of a top-right corner of *this.
///
/// \param cRows the number of rows in the corner
/// \param cCols the number of columns in the corner
///
/// Example: \include MatrixBase_topRightCorner_int_int.cpp
/// Output: \verbinclude MatrixBase_topRightCorner_int_int.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline BlockXpr topRightCorner(Index cRows, Index cCols)
{
return BlockXpr(derived(), 0, cols() - cCols, cRows, cCols);
}
/// This is the const version of topRightCorner(Index, Index).
EIGEN_DEVICE_FUNC
inline const ConstBlockXpr topRightCorner(Index cRows, Index cCols) const
{
return ConstBlockXpr(derived(), 0, cols() - cCols, cRows, cCols);
}
/// \returns an expression of a fixed-size top-right corner of *this.
///
/// \tparam CRows the number of rows in the corner
/// \tparam CCols the number of columns in the corner
///
/// Example: \include MatrixBase_template_int_int_topRightCorner.cpp
/// Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block<int,int>(Index,Index)
///
template<int CRows, int CCols>
EIGEN_DEVICE_FUNC
inline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols);
}
/// This is the const version of topRightCorner<int, int>().
template<int CRows, int CCols>
EIGEN_DEVICE_FUNC
inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols);
}
/// \returns an expression of a top-right corner of *this.
///
/// \tparam CRows number of rows in corner as specified at compile-time
/// \tparam CCols number of columns in corner as specified at compile-time
/// \param cRows number of rows in corner as specified at run-time
/// \param cCols number of columns in corner as specified at run-time
///
/// This function is mainly useful for corners where the number of rows is specified at compile-time
/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
/// information should not contradict. In other words, \a cRows should equal \a CRows unless
/// \a CRows is \a Dynamic, and the same for the number of columns.
///
/// Example: \include MatrixBase_template_int_int_topRightCorner_int_int.cpp
/// Output: \verbinclude MatrixBase_template_int_int_topRightCorner_int_int.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block
///
template<int CRows, int CCols>
inline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols);
}
/// This is the const version of topRightCorner<int, int>(Index, Index).
template<int CRows, int CCols>
inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols);
}
/// \returns a dynamic-size expression of a top-left corner of *this.
///
/// \param cRows the number of rows in the corner
/// \param cCols the number of columns in the corner
///
/// Example: \include MatrixBase_topLeftCorner_int_int.cpp
/// Output: \verbinclude MatrixBase_topLeftCorner_int_int.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline BlockXpr topLeftCorner(Index cRows, Index cCols)
{
return BlockXpr(derived(), 0, 0, cRows, cCols);
}
/// This is the const version of topLeftCorner(Index, Index).
EIGEN_DEVICE_FUNC
inline const ConstBlockXpr topLeftCorner(Index cRows, Index cCols) const
{
return ConstBlockXpr(derived(), 0, 0, cRows, cCols);
}
/// \returns an expression of a fixed-size top-left corner of *this.
///
/// The template parameters CRows and CCols are the number of rows and columns in the corner.
///
/// Example: \include MatrixBase_template_int_int_topLeftCorner.cpp
/// Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int CRows, int CCols>
EIGEN_DEVICE_FUNC
inline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0);
}
/// This is the const version of topLeftCorner<int, int>().
template<int CRows, int CCols>
EIGEN_DEVICE_FUNC
inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0);
}
/// \returns an expression of a top-left corner of *this.
///
/// \tparam CRows number of rows in corner as specified at compile-time
/// \tparam CCols number of columns in corner as specified at compile-time
/// \param cRows number of rows in corner as specified at run-time
/// \param cCols number of columns in corner as specified at run-time
///
/// This function is mainly useful for corners where the number of rows is specified at compile-time
/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
/// information should not contradict. In other words, \a cRows should equal \a CRows unless
/// \a CRows is \a Dynamic, and the same for the number of columns.
///
/// Example: \include MatrixBase_template_int_int_topLeftCorner_int_int.cpp
/// Output: \verbinclude MatrixBase_template_int_int_topLeftCorner_int_int.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block
///
template<int CRows, int CCols>
inline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols);
}
/// This is the const version of topLeftCorner<int, int>(Index, Index).
template<int CRows, int CCols>
inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols);
}
/// \returns a dynamic-size expression of a bottom-right corner of *this.
///
/// \param cRows the number of rows in the corner
/// \param cCols the number of columns in the corner
///
/// Example: \include MatrixBase_bottomRightCorner_int_int.cpp
/// Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline BlockXpr bottomRightCorner(Index cRows, Index cCols)
{
return BlockXpr(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
/// This is the const version of bottomRightCorner(Index, Index).
EIGEN_DEVICE_FUNC
inline const ConstBlockXpr bottomRightCorner(Index cRows, Index cCols) const
{
return ConstBlockXpr(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
/// \returns an expression of a fixed-size bottom-right corner of *this.
///
/// The template parameters CRows and CCols are the number of rows and columns in the corner.
///
/// Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp
/// Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int CRows, int CCols>
EIGEN_DEVICE_FUNC
inline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols);
}
/// This is the const version of bottomRightCorner<int, int>().
template<int CRows, int CCols>
EIGEN_DEVICE_FUNC
inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols);
}
/// \returns an expression of a bottom-right corner of *this.
///
/// \tparam CRows number of rows in corner as specified at compile-time
/// \tparam CCols number of columns in corner as specified at compile-time
/// \param cRows number of rows in corner as specified at run-time
/// \param cCols number of columns in corner as specified at run-time
///
/// This function is mainly useful for corners where the number of rows is specified at compile-time
/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
/// information should not contradict. In other words, \a cRows should equal \a CRows unless
/// \a CRows is \a Dynamic, and the same for the number of columns.
///
/// Example: \include MatrixBase_template_int_int_bottomRightCorner_int_int.cpp
/// Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner_int_int.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block
///
template<int CRows, int CCols>
inline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
/// This is the const version of bottomRightCorner<int, int>(Index, Index).
template<int CRows, int CCols>
inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
}
/// \returns a dynamic-size expression of a bottom-left corner of *this.
///
/// \param cRows the number of rows in the corner
/// \param cCols the number of columns in the corner
///
/// Example: \include MatrixBase_bottomLeftCorner_int_int.cpp
/// Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline BlockXpr bottomLeftCorner(Index cRows, Index cCols)
{
return BlockXpr(derived(), rows() - cRows, 0, cRows, cCols);
}
/// This is the const version of bottomLeftCorner(Index, Index).
EIGEN_DEVICE_FUNC
inline const ConstBlockXpr bottomLeftCorner(Index cRows, Index cCols) const
{
return ConstBlockXpr(derived(), rows() - cRows, 0, cRows, cCols);
}
/// \returns an expression of a fixed-size bottom-left corner of *this.
///
/// The template parameters CRows and CCols are the number of rows and columns in the corner.
///
/// Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp
/// Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int CRows, int CCols>
EIGEN_DEVICE_FUNC
inline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner()
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0);
}
/// This is the const version of bottomLeftCorner<int, int>().
template<int CRows, int CCols>
EIGEN_DEVICE_FUNC
inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner() const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0);
}
/// \returns an expression of a bottom-left corner of *this.
///
/// \tparam CRows number of rows in corner as specified at compile-time
/// \tparam CCols number of columns in corner as specified at compile-time
/// \param cRows number of rows in corner as specified at run-time
/// \param cCols number of columns in corner as specified at run-time
///
/// This function is mainly useful for corners where the number of rows is specified at compile-time
/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
/// information should not contradict. In other words, \a cRows should equal \a CRows unless
/// \a CRows is \a Dynamic, and the same for the number of columns.
///
/// Example: \include MatrixBase_template_int_int_bottomLeftCorner_int_int.cpp
/// Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner_int_int.out
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block
///
template<int CRows, int CCols>
inline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols)
{
return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols);
}
/// This is the const version of bottomLeftCorner<int, int>(Index, Index).
template<int CRows, int CCols>
inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols) const
{
return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols);
}
/// \returns a block consisting of the top rows of *this.
///
/// \param n the number of rows in the block
///
/// Example: \include MatrixBase_topRows_int.cpp
/// Output: \verbinclude MatrixBase_topRows_int.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline RowsBlockXpr topRows(Index n)
{
return RowsBlockXpr(derived(), 0, 0, n, cols());
}
/// This is the const version of topRows(Index).
EIGEN_DEVICE_FUNC
inline ConstRowsBlockXpr topRows(Index n) const
{
return ConstRowsBlockXpr(derived(), 0, 0, n, cols());
}
/// \returns a block consisting of the top rows of *this.
///
/// \tparam N the number of rows in the block as specified at compile-time
/// \param n the number of rows in the block as specified at run-time
///
/// The compile-time and run-time information should not contradict. In other words,
/// \a n should equal \a N unless \a N is \a Dynamic.
///
/// Example: \include MatrixBase_template_int_topRows.cpp
/// Output: \verbinclude MatrixBase_template_int_topRows.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int N>
EIGEN_DEVICE_FUNC
inline typename NRowsBlockXpr<N>::Type topRows(Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
/// This is the const version of topRows<int>().
template<int N>
EIGEN_DEVICE_FUNC
inline typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
/// \returns a block consisting of the bottom rows of *this.
///
/// \param n the number of rows in the block
///
/// Example: \include MatrixBase_bottomRows_int.cpp
/// Output: \verbinclude MatrixBase_bottomRows_int.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline RowsBlockXpr bottomRows(Index n)
{
return RowsBlockXpr(derived(), rows() - n, 0, n, cols());
}
/// This is the const version of bottomRows(Index).
EIGEN_DEVICE_FUNC
inline ConstRowsBlockXpr bottomRows(Index n) const
{
return ConstRowsBlockXpr(derived(), rows() - n, 0, n, cols());
}
/// \returns a block consisting of the bottom rows of *this.
///
/// \tparam N the number of rows in the block as specified at compile-time
/// \param n the number of rows in the block as specified at run-time
///
/// The compile-time and run-time information should not contradict. In other words,
/// \a n should equal \a N unless \a N is \a Dynamic.
///
/// Example: \include MatrixBase_template_int_bottomRows.cpp
/// Output: \verbinclude MatrixBase_template_int_bottomRows.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int N>
EIGEN_DEVICE_FUNC
inline typename NRowsBlockXpr<N>::Type bottomRows(Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
/// This is the const version of bottomRows<int>().
template<int N>
EIGEN_DEVICE_FUNC
inline typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
/// \returns a block consisting of a range of rows of *this.
///
/// \param startRow the index of the first row in the block
/// \param n the number of rows in the block
///
/// Example: \include DenseBase_middleRows_int.cpp
/// Output: \verbinclude DenseBase_middleRows_int.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline RowsBlockXpr middleRows(Index startRow, Index n)
{
return RowsBlockXpr(derived(), startRow, 0, n, cols());
}
/// This is the const version of middleRows(Index,Index).
EIGEN_DEVICE_FUNC
inline ConstRowsBlockXpr middleRows(Index startRow, Index n) const
{
return ConstRowsBlockXpr(derived(), startRow, 0, n, cols());
}
/// \returns a block consisting of a range of rows of *this.
///
/// \tparam N the number of rows in the block as specified at compile-time
/// \param startRow the index of the first row in the block
/// \param n the number of rows in the block as specified at run-time
///
/// The compile-time and run-time information should not contradict. In other words,
/// \a n should equal \a N unless \a N is \a Dynamic.
///
/// Example: \include DenseBase_template_int_middleRows.cpp
/// Output: \verbinclude DenseBase_template_int_middleRows.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int N>
EIGEN_DEVICE_FUNC
inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
/// This is the const version of middleRows<int>().
template<int N>
EIGEN_DEVICE_FUNC
inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
/// \returns a block consisting of the left columns of *this.
///
/// \param n the number of columns in the block
///
/// Example: \include MatrixBase_leftCols_int.cpp
/// Output: \verbinclude MatrixBase_leftCols_int.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline ColsBlockXpr leftCols(Index n)
{
return ColsBlockXpr(derived(), 0, 0, rows(), n);
}
/// This is the const version of leftCols(Index).
EIGEN_DEVICE_FUNC
inline ConstColsBlockXpr leftCols(Index n) const
{
return ConstColsBlockXpr(derived(), 0, 0, rows(), n);
}
/// \returns a block consisting of the left columns of *this.
///
/// \tparam N the number of columns in the block as specified at compile-time
/// \param n the number of columns in the block as specified at run-time
///
/// The compile-time and run-time information should not contradict. In other words,
/// \a n should equal \a N unless \a N is \a Dynamic.
///
/// Example: \include MatrixBase_template_int_leftCols.cpp
/// Output: \verbinclude MatrixBase_template_int_leftCols.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int N>
EIGEN_DEVICE_FUNC
inline typename NColsBlockXpr<N>::Type leftCols(Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
/// This is the const version of leftCols<int>().
template<int N>
EIGEN_DEVICE_FUNC
inline typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
/// \returns a block consisting of the right columns of *this.
///
/// \param n the number of columns in the block
///
/// Example: \include MatrixBase_rightCols_int.cpp
/// Output: \verbinclude MatrixBase_rightCols_int.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline ColsBlockXpr rightCols(Index n)
{
return ColsBlockXpr(derived(), 0, cols() - n, rows(), n);
}
/// This is the const version of rightCols(Index).
EIGEN_DEVICE_FUNC
inline ConstColsBlockXpr rightCols(Index n) const
{
return ConstColsBlockXpr(derived(), 0, cols() - n, rows(), n);
}
/// \returns a block consisting of the right columns of *this.
///
/// \tparam N the number of columns in the block as specified at compile-time
/// \param n the number of columns in the block as specified at run-time
///
/// The compile-time and run-time information should not contradict. In other words,
/// \a n should equal \a N unless \a N is \a Dynamic.
///
/// Example: \include MatrixBase_template_int_rightCols.cpp
/// Output: \verbinclude MatrixBase_template_int_rightCols.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int N>
EIGEN_DEVICE_FUNC
inline typename NColsBlockXpr<N>::Type rightCols(Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
/// This is the const version of rightCols<int>().
template<int N>
EIGEN_DEVICE_FUNC
inline typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
/// \returns a block consisting of a range of columns of *this.
///
/// \param startCol the index of the first column in the block
/// \param numCols the number of columns in the block
///
/// Example: \include DenseBase_middleCols_int.cpp
/// Output: \verbinclude DenseBase_middleCols_int.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
EIGEN_DEVICE_FUNC
inline ColsBlockXpr middleCols(Index startCol, Index numCols)
{
return ColsBlockXpr(derived(), 0, startCol, rows(), numCols);
}
/// This is the const version of middleCols(Index,Index).
EIGEN_DEVICE_FUNC
inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const
{
return ConstColsBlockXpr(derived(), 0, startCol, rows(), numCols);
}
/// \returns a block consisting of a range of columns of *this.
///
/// \tparam N the number of columns in the block as specified at compile-time
/// \param startCol the index of the first column in the block
/// \param n the number of columns in the block as specified at run-time
///
/// The compile-time and run-time information should not contradict. In other words,
/// \a n should equal \a N unless \a N is \a Dynamic.
///
/// Example: \include DenseBase_template_int_middleCols.cpp
/// Output: \verbinclude DenseBase_template_int_middleCols.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int N>
EIGEN_DEVICE_FUNC
inline typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
/// This is the const version of middleCols<int>().
template<int N>
EIGEN_DEVICE_FUNC
inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
/// \returns a fixed-size expression of a block in *this.
///
/// The template parameters \a NRows and \a NCols are the number of
/// rows and columns in the block.
///
/// \param startRow the first row in the block
/// \param startCol the first column in the block
///
/// Example: \include MatrixBase_block_int_int.cpp
/// Output: \verbinclude MatrixBase_block_int_int.out
///
/// \note since block is a templated member, the keyword template has to be used
/// if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int NRows, int NCols>
EIGEN_DEVICE_FUNC
inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol)
{
return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol);
}
/// This is the const version of block<>(Index, Index). */
template<int NRows, int NCols>
EIGEN_DEVICE_FUNC
inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol) const
{
return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol);
}
/// \returns an expression of a block in *this.
///
/// \tparam NRows number of rows in block as specified at compile-time
/// \tparam NCols number of columns in block as specified at compile-time
/// \param startRow the first row in the block
/// \param startCol the first column in the block
/// \param blockRows number of rows in block as specified at run-time
/// \param blockCols number of columns in block as specified at run-time
///
/// This function is mainly useful for blocks where the number of rows is specified at compile-time
/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
/// information should not contradict. In other words, \a blockRows should equal \a NRows unless
/// \a NRows is \a Dynamic, and the same for the number of columns.
///
/// Example: \include MatrixBase_template_int_int_block_int_int_int_int.cpp
/// Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.cpp
///
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
///
/// \sa class Block, block(Index,Index,Index,Index)
///
template<int NRows, int NCols>
inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
Index blockRows, Index blockCols)
{
return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols);
}
/// This is the const version of block<>(Index, Index, Index, Index).
template<int NRows, int NCols>
inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,
Index blockRows, Index blockCols) const
{
return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols);
}
/// \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0.
///
/// Example: \include MatrixBase_col.cpp
/// Output: \verbinclude MatrixBase_col.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)
/**
* \sa row(), class Block */
EIGEN_DEVICE_FUNC
inline ColXpr col(Index i)
{
return ColXpr(derived(), i);
}
/// This is the const version of col().
EIGEN_DEVICE_FUNC
inline ConstColXpr col(Index i) const
{
return ConstColXpr(derived(), i);
}
/// \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0.
///
/// Example: \include MatrixBase_row.cpp
/// Output: \verbinclude MatrixBase_row.out
///
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)
/**
* \sa col(), class Block */
EIGEN_DEVICE_FUNC
inline RowXpr row(Index i)
{
return RowXpr(derived(), i);
}
/// This is the const version of row(). */
EIGEN_DEVICE_FUNC
inline ConstRowXpr row(Index i) const
{
return ConstRowXpr(derived(), i);
}
/// \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
///
/// \only_for_vectors
///
/// \param start the first coefficient in the segment
/// \param n the number of coefficients in the segment
///
/// Example: \include MatrixBase_segment_int_int.cpp
/// Output: \verbinclude MatrixBase_segment_int_int.out
///
/// \note Even though the returned expression has dynamic size, in the case
/// when it is applied to a fixed-size vector, it inherits a fixed maximal size,
/// which means that evaluating it does not cause a dynamic memory allocation.
///
/// \sa class Block, segment(Index)
///
EIGEN_DEVICE_FUNC
inline SegmentReturnType segment(Index start, Index n)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return SegmentReturnType(derived(), start, n);
}
/// This is the const version of segment(Index,Index).
EIGEN_DEVICE_FUNC
inline ConstSegmentReturnType segment(Index start, Index n) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return ConstSegmentReturnType(derived(), start, n);
}
/// \returns a dynamic-size expression of the first coefficients of *this.
///
/// \only_for_vectors
///
/// \param n the number of coefficients in the segment
///
/// Example: \include MatrixBase_start_int.cpp
/// Output: \verbinclude MatrixBase_start_int.out
///
/// \note Even though the returned expression has dynamic size, in the case
/// when it is applied to a fixed-size vector, it inherits a fixed maximal size,
/// which means that evaluating it does not cause a dynamic memory allocation.
///
/// \sa class Block, block(Index,Index)
///
EIGEN_DEVICE_FUNC
inline SegmentReturnType head(Index n)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return SegmentReturnType(derived(), 0, n);
}
/// This is the const version of head(Index).
EIGEN_DEVICE_FUNC
inline ConstSegmentReturnType head(Index n) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return ConstSegmentReturnType(derived(), 0, n);
}
/// \returns a dynamic-size expression of the last coefficients of *this.
///
/// \only_for_vectors
///
/// \param n the number of coefficients in the segment
///
/// Example: \include MatrixBase_end_int.cpp
/// Output: \verbinclude MatrixBase_end_int.out
///
/// \note Even though the returned expression has dynamic size, in the case
/// when it is applied to a fixed-size vector, it inherits a fixed maximal size,
/// which means that evaluating it does not cause a dynamic memory allocation.
///
/// \sa class Block, block(Index,Index)
///
EIGEN_DEVICE_FUNC
inline SegmentReturnType tail(Index n)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return SegmentReturnType(derived(), this->size() - n, n);
}
/// This is the const version of tail(Index).
EIGEN_DEVICE_FUNC
inline ConstSegmentReturnType tail(Index n) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return ConstSegmentReturnType(derived(), this->size() - n, n);
}
/// \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
///
/// \only_for_vectors
///
/// \tparam N the number of coefficients in the segment as specified at compile-time
/// \param start the index of the first element in the segment
/// \param n the number of coefficients in the segment as specified at compile-time
///
/// The compile-time and run-time information should not contradict. In other words,
/// \a n should equal \a N unless \a N is \a Dynamic.
///
/// Example: \include MatrixBase_template_int_segment.cpp
/// Output: \verbinclude MatrixBase_template_int_segment.out
///
/// \sa class Block
///
template<int N>
EIGEN_DEVICE_FUNC
inline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<N>::Type(derived(), start, n);
}
/// This is the const version of segment<int>(Index).
template<int N>
EIGEN_DEVICE_FUNC
inline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), start, n);
}
/// \returns a fixed-size expression of the first coefficients of *this.
///
/// \only_for_vectors
///
/// \tparam N the number of coefficients in the segment as specified at compile-time
/// \param n the number of coefficients in the segment as specified at run-time
///
/// The compile-time and run-time information should not contradict. In other words,
/// \a n should equal \a N unless \a N is \a Dynamic.
///
/// Example: \include MatrixBase_template_int_start.cpp
/// Output: \verbinclude MatrixBase_template_int_start.out
///
/// \sa class Block
///
template<int N>
EIGEN_DEVICE_FUNC
inline typename FixedSegmentReturnType<N>::Type head(Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<N>::Type(derived(), 0, n);
}
/// This is the const version of head<int>().
template<int N>
EIGEN_DEVICE_FUNC
inline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), 0, n);
}
/// \returns a fixed-size expression of the last coefficients of *this.
///
/// \only_for_vectors
///
/// \tparam N the number of coefficients in the segment as specified at compile-time
/// \param n the number of coefficients in the segment as specified at run-time
///
/// The compile-time and run-time information should not contradict. In other words,
/// \a n should equal \a N unless \a N is \a Dynamic.
///
/// Example: \include MatrixBase_template_int_end.cpp
/// Output: \verbinclude MatrixBase_template_int_end.out
///
/// \sa class Block
///
template<int N>
EIGEN_DEVICE_FUNC
inline typename FixedSegmentReturnType<N>::Type tail(Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<N>::Type(derived(), size() - n);
}
/// This is the const version of tail<int>.
template<int N>
EIGEN_DEVICE_FUNC
inline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<N>::Type(derived(), size() - n);
}
| 37,403 | 34.320113 | 149 |
h
|
abess
|
abess-master/python/include/Eigen/src/plugins/CommonCwiseBinaryOps.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2016 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This file is a base class plugin containing common coefficient wise functions.
/** \returns an expression of the difference of \c *this and \a other
*
* \note If you want to substract a given scalar from all coefficients, see Cwise::operator-().
*
* \sa class CwiseBinaryOp, operator-=()
*/
EIGEN_MAKE_CWISE_BINARY_OP(operator-,difference)
/** \returns an expression of the sum of \c *this and \a other
*
* \note If you want to add a given scalar to all coefficients, see Cwise::operator+().
*
* \sa class CwiseBinaryOp, operator+=()
*/
EIGEN_MAKE_CWISE_BINARY_OP(operator+,sum)
/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other
*
* The template parameter \a CustomBinaryOp is the type of the functor
* of the custom operator (see class CwiseBinaryOp for an example)
*
* Here is an example illustrating the use of custom functors:
* \include class_CwiseBinaryOp.cpp
* Output: \verbinclude class_CwiseBinaryOp.out
*
* \sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct()
*/
template<typename CustomBinaryOp, typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>
binaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const
{
return CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>(derived(), other.derived(), func);
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_MAKE_SCALAR_BINARY_OP(operator*,product)
#else
/** \returns an expression of \c *this scaled by the scalar factor \a scalar
*
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
*/
template<typename T>
const CwiseBinaryOp<internal::scalar_product_op<Scalar,T>,Derived,Constant<T> > operator*(const T& scalar) const;
/** \returns an expression of \a expr scaled by the scalar factor \a scalar
*
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
*/
template<typename T> friend
const CwiseBinaryOp<internal::scalar_product_op<T,Scalar>,Constant<T>,Derived> operator*(const T& scalar, const StorageBaseType& expr);
#endif
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(operator/,quotient)
#else
/** \returns an expression of \c *this divided by the scalar value \a scalar
*
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
*/
template<typename T>
const CwiseBinaryOp<internal::scalar_quotient_op<Scalar,T>,Derived,Constant<T> > operator/(const T& scalar) const;
#endif
/** \returns an expression of the coefficient-wise boolean \b and operator of \c *this and \a other
*
* \warning this operator is for expression of bool only.
*
* Example: \include Cwise_boolean_and.cpp
* Output: \verbinclude Cwise_boolean_and.out
*
* \sa operator||(), select()
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline const CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>
operator&&(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
return CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>(derived(),other.derived());
}
/** \returns an expression of the coefficient-wise boolean \b or operator of \c *this and \a other
*
* \warning this operator is for expression of bool only.
*
* Example: \include Cwise_boolean_or.cpp
* Output: \verbinclude Cwise_boolean_or.out
*
* \sa operator&&(), select()
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline const CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>
operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());
}
| 4,828 | 40.62931 | 135 |
h
|
abess
|
abess-master/python/include/Eigen/src/plugins/CommonCwiseUnaryOps.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This file is a base class plugin containing common coefficient wise functions.
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal the return type of conjugate() */
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
const CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>,
const Derived&
>::type ConjugateReturnType;
/** \internal the return type of real() const */
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
const CwiseUnaryOp<internal::scalar_real_op<Scalar>, const Derived>,
const Derived&
>::type RealReturnType;
/** \internal the return type of real() */
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
CwiseUnaryView<internal::scalar_real_ref_op<Scalar>, Derived>,
Derived&
>::type NonConstRealReturnType;
/** \internal the return type of imag() const */
typedef CwiseUnaryOp<internal::scalar_imag_op<Scalar>, const Derived> ImagReturnType;
/** \internal the return type of imag() */
typedef CwiseUnaryView<internal::scalar_imag_ref_op<Scalar>, Derived> NonConstImagReturnType;
typedef CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived> NegativeReturnType;
#endif // not EIGEN_PARSED_BY_DOXYGEN
/// \returns an expression of the opposite of \c *this
///
EIGEN_DOC_UNARY_ADDONS(operator-,opposite)
///
EIGEN_DEVICE_FUNC
inline const NegativeReturnType
operator-() const { return NegativeReturnType(derived()); }
template<class NewType> struct CastXpr { typedef typename internal::cast_return_type<Derived,const CwiseUnaryOp<internal::scalar_cast_op<Scalar, NewType>, const Derived> >::type Type; };
/// \returns an expression of \c *this with the \a Scalar type casted to
/// \a NewScalar.
///
/// The template parameter \a NewScalar is the type we are casting the scalars to.
///
EIGEN_DOC_UNARY_ADDONS(cast,conversion function)
///
/// \sa class CwiseUnaryOp
///
template<typename NewType>
EIGEN_DEVICE_FUNC
typename CastXpr<NewType>::Type
cast() const
{
return typename CastXpr<NewType>::Type(derived());
}
/// \returns an expression of the complex conjugate of \c *this.
///
EIGEN_DOC_UNARY_ADDONS(conjugate,complex conjugate)
///
/// \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_conj">Math functions</a>, MatrixBase::adjoint()
EIGEN_DEVICE_FUNC
inline ConjugateReturnType
conjugate() const
{
return ConjugateReturnType(derived());
}
/// \returns a read-only expression of the real part of \c *this.
///
EIGEN_DOC_UNARY_ADDONS(real,real part function)
///
/// \sa imag()
EIGEN_DEVICE_FUNC
inline RealReturnType
real() const { return RealReturnType(derived()); }
/// \returns an read-only expression of the imaginary part of \c *this.
///
EIGEN_DOC_UNARY_ADDONS(imag,imaginary part function)
///
/// \sa real()
EIGEN_DEVICE_FUNC
inline const ImagReturnType
imag() const { return ImagReturnType(derived()); }
/// \brief Apply a unary operator coefficient-wise
/// \param[in] func Functor implementing the unary operator
/// \tparam CustomUnaryOp Type of \a func
/// \returns An expression of a custom coefficient-wise unary operator \a func of *this
///
/// The function \c ptr_fun() from the C++ standard library can be used to make functors out of normal functions.
///
/// Example:
/// \include class_CwiseUnaryOp_ptrfun.cpp
/// Output: \verbinclude class_CwiseUnaryOp_ptrfun.out
///
/// Genuine functors allow for more possibilities, for instance it may contain a state.
///
/// Example:
/// \include class_CwiseUnaryOp.cpp
/// Output: \verbinclude class_CwiseUnaryOp.out
///
EIGEN_DOC_UNARY_ADDONS(unaryExpr,unary function)
///
/// \sa unaryViewExpr, binaryExpr, class CwiseUnaryOp
///
template<typename CustomUnaryOp>
EIGEN_DEVICE_FUNC
inline const CwiseUnaryOp<CustomUnaryOp, const Derived>
unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const
{
return CwiseUnaryOp<CustomUnaryOp, const Derived>(derived(), func);
}
/// \returns an expression of a custom coefficient-wise unary operator \a func of *this
///
/// The template parameter \a CustomUnaryOp is the type of the functor
/// of the custom unary operator.
///
/// Example:
/// \include class_CwiseUnaryOp.cpp
/// Output: \verbinclude class_CwiseUnaryOp.out
///
EIGEN_DOC_UNARY_ADDONS(unaryViewExpr,unary function)
///
/// \sa unaryExpr, binaryExpr class CwiseUnaryOp
///
template<typename CustomViewOp>
EIGEN_DEVICE_FUNC
inline const CwiseUnaryView<CustomViewOp, const Derived>
unaryViewExpr(const CustomViewOp& func = CustomViewOp()) const
{
return CwiseUnaryView<CustomViewOp, const Derived>(derived(), func);
}
/// \returns a non const expression of the real part of \c *this.
///
EIGEN_DOC_UNARY_ADDONS(real,real part function)
///
/// \sa imag()
EIGEN_DEVICE_FUNC
inline NonConstRealReturnType
real() { return NonConstRealReturnType(derived()); }
/// \returns a non const expression of the imaginary part of \c *this.
///
EIGEN_DOC_UNARY_ADDONS(imag,imaginary part function)
///
/// \sa real()
EIGEN_DEVICE_FUNC
inline NonConstImagReturnType
imag() { return NonConstImagReturnType(derived()); }
| 5,621 | 33.280488 | 186 |
h
|
abess
|
abess-master/python/include/Eigen/src/plugins/MatrixCwiseBinaryOps.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This file is a base class plugin containing matrix specifics coefficient wise functions.
/** \returns an expression of the Schur product (coefficient wise product) of *this and \a other
*
* Example: \include MatrixBase_cwiseProduct.cpp
* Output: \verbinclude MatrixBase_cwiseProduct.out
*
* \sa class CwiseBinaryOp, cwiseAbs2
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)
cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived());
}
/** \returns an expression of the coefficient-wise == operator of *this and \a other
*
* \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
* In order to check for equality between two vectors or matrices with floating-point coefficients, it is
* generally a far better idea to use a fuzzy comparison as provided by isApprox() and
* isMuchSmallerThan().
*
* Example: \include MatrixBase_cwiseEqual.cpp
* Output: \verbinclude MatrixBase_cwiseEqual.out
*
* \sa cwiseNotEqual(), isApprox(), isMuchSmallerThan()
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline const CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>
cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
/** \returns an expression of the coefficient-wise != operator of *this and \a other
*
* \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
* In order to check for equality between two vectors or matrices with floating-point coefficients, it is
* generally a far better idea to use a fuzzy comparison as provided by isApprox() and
* isMuchSmallerThan().
*
* Example: \include MatrixBase_cwiseNotEqual.cpp
* Output: \verbinclude MatrixBase_cwiseNotEqual.out
*
* \sa cwiseEqual(), isApprox(), isMuchSmallerThan()
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline const CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>
cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
/** \returns an expression of the coefficient-wise min of *this and \a other
*
* Example: \include MatrixBase_cwiseMin.cpp
* Output: \verbinclude MatrixBase_cwiseMin.out
*
* \sa class CwiseBinaryOp, max()
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const OtherDerived>
cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
/** \returns an expression of the coefficient-wise min of *this and scalar \a other
*
* \sa class CwiseBinaryOp, min()
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const ConstantReturnType>
cwiseMin(const Scalar &other) const
{
return cwiseMin(Derived::Constant(rows(), cols(), other));
}
/** \returns an expression of the coefficient-wise max of *this and \a other
*
* Example: \include MatrixBase_cwiseMax.cpp
* Output: \verbinclude MatrixBase_cwiseMax.out
*
* \sa class CwiseBinaryOp, min()
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const OtherDerived>
cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
/** \returns an expression of the coefficient-wise max of *this and scalar \a other
*
* \sa class CwiseBinaryOp, min()
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const ConstantReturnType>
cwiseMax(const Scalar &other) const
{
return cwiseMax(Derived::Constant(rows(), cols(), other));
}
/** \returns an expression of the coefficient-wise quotient of *this and \a other
*
* Example: \include MatrixBase_cwiseQuotient.cpp
* Output: \verbinclude MatrixBase_cwiseQuotient.out
*
* \sa class CwiseBinaryOp, cwiseProduct(), cwiseInverse()
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>
cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,Scalar,internal::cmp_EQ>, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType;
/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
*
* \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
* In order to check for equality between two vectors or matrices with floating-point coefficients, it is
* generally a far better idea to use a fuzzy comparison as provided by isApprox() and
* isMuchSmallerThan().
*
* \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
*/
EIGEN_DEVICE_FUNC
inline const CwiseScalarEqualReturnType
cwiseEqual(const Scalar& s) const
{
return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op<Scalar,Scalar,internal::cmp_EQ>());
}
| 6,375 | 40.673203 | 147 |
h
|
abess
|
abess-master/python/include/Eigen/src/plugins/MatrixCwiseUnaryOps.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This file is included into the body of the base classes supporting matrix specific coefficient-wise functions.
// This include MatrixBase and SparseMatrixBase.
typedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> CwiseAbsReturnType;
typedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> CwiseAbs2ReturnType;
typedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> CwiseSqrtReturnType;
typedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> CwiseSignReturnType;
typedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> CwiseInverseReturnType;
/// \returns an expression of the coefficient-wise absolute value of \c *this
///
/// Example: \include MatrixBase_cwiseAbs.cpp
/// Output: \verbinclude MatrixBase_cwiseAbs.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseAbs,absolute value)
///
/// \sa cwiseAbs2()
///
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseAbsReturnType
cwiseAbs() const { return CwiseAbsReturnType(derived()); }
/// \returns an expression of the coefficient-wise squared absolute value of \c *this
///
/// Example: \include MatrixBase_cwiseAbs2.cpp
/// Output: \verbinclude MatrixBase_cwiseAbs2.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseAbs2,squared absolute value)
///
/// \sa cwiseAbs()
///
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseAbs2ReturnType
cwiseAbs2() const { return CwiseAbs2ReturnType(derived()); }
/// \returns an expression of the coefficient-wise square root of *this.
///
/// Example: \include MatrixBase_cwiseSqrt.cpp
/// Output: \verbinclude MatrixBase_cwiseSqrt.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseSqrt,square-root)
///
/// \sa cwisePow(), cwiseSquare()
///
EIGEN_DEVICE_FUNC
inline const CwiseSqrtReturnType
cwiseSqrt() const { return CwiseSqrtReturnType(derived()); }
/// \returns an expression of the coefficient-wise signum of *this.
///
/// Example: \include MatrixBase_cwiseSign.cpp
/// Output: \verbinclude MatrixBase_cwiseSign.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseSign,sign function)
///
EIGEN_DEVICE_FUNC
inline const CwiseSignReturnType
cwiseSign() const { return CwiseSignReturnType(derived()); }
/// \returns an expression of the coefficient-wise inverse of *this.
///
/// Example: \include MatrixBase_cwiseInverse.cpp
/// Output: \verbinclude MatrixBase_cwiseInverse.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseInverse,inverse)
///
/// \sa cwiseProduct()
///
EIGEN_DEVICE_FUNC
inline const CwiseInverseReturnType
cwiseInverse() const { return CwiseInverseReturnType(derived()); }
| 2,937 | 33.162791 | 113 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
// Copyright (C) 2013 Christian Seiler <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_H
#define EIGEN_CXX11_TENSOR_TENSOR_H
namespace Eigen {
/** \class Tensor
* \ingroup CXX11_Tensor_Module
*
* \brief The tensor class.
*
* The %Tensor class is the work-horse for all \em dense tensors within Eigen.
*
* The %Tensor class encompasses only dynamic-size objects so far.
*
* The first two template parameters are required:
* \tparam Scalar_ \anchor tensor_tparam_scalar Numeric type, e.g. float, double, int or std::complex<float>.
* User defined scalar types are supported as well (see \ref user_defined_scalars "here").
* \tparam NumIndices_ Number of indices (i.e. rank of the tensor)
*
* The remaining template parameters are optional -- in most cases you don't have to worry about them.
* \tparam Options_ \anchor tensor_tparam_options A combination of either \b #RowMajor or \b #ColMajor, and of either
* \b #AutoAlign or \b #DontAlign.
* The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required
* for vectorization. It defaults to aligning tensors. Note that tensors currently do not support any operations that profit from vectorization.
* Support for such operations (i.e. adding two tensors etc.) is planned.
*
* You can access elements of tensors using normal subscripting:
*
* \code
* Eigen::Tensor<double, 4> t(10, 10, 10, 10);
* t(0, 1, 2, 3) = 42.0;
* \endcode
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TENSOR_PLUGIN.
*
* <i><b>Some notes:</b></i>
*
* <dl>
* <dt><b>Relation to other parts of Eigen:</b></dt>
* <dd>The midterm developement goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
* taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code
* by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor
* class does not provide any of these features and is only available as a stand-alone class that just allows for
* coefficient access. Also, when fixed-size tensors are implemented, the number of template arguments is likely to
* change dramatically.</dd>
* </dl>
*
* \ref TopicStorageOrders
*/
template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
{
public:
typedef Tensor<Scalar_, NumIndices_, Options_, IndexType_> Self;
typedef TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> > Base;
typedef typename Eigen::internal::nested<Self>::type Nested;
typedef typename internal::traits<Self>::StorageKind StorageKind;
typedef typename internal::traits<Self>::Index Index;
typedef Scalar_ Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename Base::CoeffReturnType CoeffReturnType;
enum {
IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign),
Layout = Options_ & RowMajor ? RowMajor : ColMajor,
CoordAccess = true,
RawAccess = true
};
static const int Options = Options_;
static const int NumIndices = NumIndices_;
typedef DSizes<Index, NumIndices_> Dimensions;
protected:
TensorStorage<Scalar, Dimensions, Options> m_storage;
#ifdef EIGEN_HAS_SFINAE
template<typename CustomIndices>
struct isOfNormalIndex{
static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value;
static const bool is_int = NumTraits<CustomIndices>::IsInteger;
static const bool value = is_array | is_int;
};
#endif
public:
// Metadata
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
// This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
// work, because that uses base().coeffRef() - and we don't yet
// implement a similar class hierarchy
inline Self& base() { return *this; }
inline const Self& base() const { return *this; }
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
#endif
// normal indices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
{
eigen_internal_assert(checkIndexRange(indices));
return m_storage.data()[linearizedIndex(indices)];
}
// custom indices
#ifdef EIGEN_HAS_SFINAE
template<typename CustomIndices,
EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const
{
return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
}
#endif
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const
{
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
return m_storage.data()[0];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
{
eigen_internal_assert(index >= 0 && index < size());
return m_storage.data()[index];
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
#endif
// normal indices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
{
eigen_internal_assert(checkIndexRange(indices));
return m_storage.data()[linearizedIndex(indices)];
}
// custom indices
#ifdef EIGEN_HAS_SFINAE
template<typename CustomIndices,
EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices)
{
return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
}
#endif
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef()
{
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
return m_storage.data()[0];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{
eigen_internal_assert(index >= 0 && index < size());
return m_storage.data()[index];
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
{
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
{
return coeff(array<Index, 2>(i0, i1));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
{
return coeff(array<Index, 3>(i0, i1, i2));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
{
return coeff(array<Index, 4>(i0, i1, i2, i3));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
{
return coeff(array<Index, 5>(i0, i1, i2, i3, i4));
}
#endif
// custom indices
#ifdef EIGEN_HAS_SFINAE
template<typename CustomIndices,
EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const
{
return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
}
#endif
// normal indices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
{
return coeff(indices);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
{
eigen_internal_assert(index >= 0 && index < size());
return coeff(index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const
{
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
return coeff();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
{
// The bracket operator is only for vectors, use the parenthesis operator instead.
EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
return coeff(index);
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
{
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
{
return coeffRef(array<Index, 2>(i0, i1));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
{
return coeffRef(array<Index, 3>(i0, i1, i2));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
{
return coeffRef(array<Index, 4>(i0, i1, i2, i3));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
{
return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4));
}
#endif
// normal indices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
{
return coeffRef(indices);
}
// custom indices
#ifdef EIGEN_HAS_SFINAE
template<typename CustomIndices,
EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices)
{
return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
}
#endif
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index)
{
eigen_assert(index >= 0 && index < size());
return coeffRef(index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()()
{
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
return coeffRef();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index)
{
// The bracket operator is only for vectors, use the parenthesis operator instead
EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeffRef(index);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Tensor()
: m_storage()
{
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Tensor(const Self& other)
: m_storage(other.m_storage)
{
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions)
: m_storage(firstDimension, otherDimensions...)
{
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
#else
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1)
: m_storage(dim1, array<Index, 1>(dim1))
{
EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2)
: m_storage(dim1*dim2, array<Index, 2>(dim1, dim2))
{
EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3)
: m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3))
{
EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4)
: m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4))
{
EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5)
: m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5))
{
EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
#endif
/** Normal Dimension */
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions)
: m_storage(internal::array_prod(dimensions), dimensions)
{
EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
{
typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
Assign assign(*this, other.derived());
resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other)
{
typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
Assign assign(*this, other.derived());
resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other)
{
typedef TensorAssignOp<Tensor, const Tensor> Assign;
Assign assign(*this, other);
resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this;
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
{
typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
Assign assign(*this, other);
resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this;
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
void resize(Index firstDimension, IndexTypes... otherDimensions)
{
// The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
}
#endif
/** Normal Dimension */
EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions)
{
int i;
Index size = Index(1);
for (i = 0; i < NumIndices; i++) {
internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]);
size *= dimensions[i];
}
#ifdef EIGEN_INITIALIZE_COEFFS
bool size_changed = size != this->size();
m_storage.resize(size, dimensions);
if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
#else
m_storage.resize(size, dimensions);
#endif
}
// Why this overload, DSizes is derived from array ??? //
EIGEN_DEVICE_FUNC void resize(const DSizes<Index, NumIndices>& dimensions) {
array<Index, NumIndices> dims;
for (int i = 0; i < NumIndices; ++i) {
dims[i] = dimensions[i];
}
resize(dims);
}
EIGEN_DEVICE_FUNC
void resize()
{
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
// Nothing to do: rank 0 tensors have fixed size
}
/** Custom Dimension */
#ifdef EIGEN_HAS_SFINAE
template<typename CustomDimension,
EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomDimension>::value) )
>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions)
{
resize(internal::customIndices2Array<Index,NumIndices>(dimensions));
}
#endif
#ifndef EIGEN_EMULATE_CXX11_META_H
template <typename std::ptrdiff_t... Indices>
EIGEN_DEVICE_FUNC
void resize(const Sizes<Indices...>& dimensions) {
array<Index, NumIndices> dims;
for (int i = 0; i < NumIndices; ++i) {
dims[i] = static_cast<Index>(dimensions[i]);
}
resize(dims);
}
#else
template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
EIGEN_DEVICE_FUNC
void resize(const Sizes<V1, V2, V3, V4, V5>& dimensions) {
array<Index, NumIndices> dims;
for (int i = 0; i < NumIndices; ++i) {
dims[i] = static_cast<Index>(dimensions[i]);
}
resize(dims);
}
#endif
protected:
bool checkIndexRange(const array<Index, NumIndices>& indices) const
{
using internal::array_apply_and_reduce;
using internal::array_zip_and_reduce;
using internal::greater_equal_zero_op;
using internal::logical_and_op;
using internal::lesser_op;
return
// check whether the indices are all >= 0
array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
// check whether the indices fit in the dimensions
array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
{
if (Options&RowMajor) {
return m_storage.dimensions().IndexOfRowMajor(indices);
} else {
return m_storage.dimensions().IndexOfColMajor(indices);
}
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_H
| 20,614 | 38.043561 | 161 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Eugene Brevdo <[email protected]>
// Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
#define EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
namespace Eigen {
namespace internal {
/** \class TensorIndexTuple
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor + Index Tuple class.
*
*
*/
template<typename XprType>
struct traits<TensorIndexTupleOp<XprType> > : public traits<XprType>
{
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef Tuple<Index, typename XprTraits::Scalar> Scalar;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
};
template<typename XprType>
struct eval<TensorIndexTupleOp<XprType>, Eigen::Dense>
{
typedef const TensorIndexTupleOp<XprType>& type;
};
template<typename XprType>
struct nested<TensorIndexTupleOp<XprType>, 1,
typename eval<TensorIndexTupleOp<XprType> >::type>
{
typedef TensorIndexTupleOp<XprType> type;
};
} // end namespace internal
template<typename XprType>
class TensorIndexTupleOp : public TensorBase<TensorIndexTupleOp<XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorIndexTupleOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename Eigen::internal::nested<TensorIndexTupleOp>::type Nested;
typedef typename Eigen::internal::traits<TensorIndexTupleOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorIndexTupleOp>::Index Index;
typedef Tuple<Index, typename XprType::CoeffReturnType> CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIndexTupleOp(const XprType& expr)
: m_xpr(expr) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
};
// Eval as rvalue
template<typename ArgType, typename Device>
struct TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device>
{
typedef TensorIndexTupleOp<ArgType> XprType;
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
static const int NumDims = internal::array_size<Dimensions>::value;
enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
PacketAccess = /*TensorEvaluator<ArgType, Device>::PacketAccess*/ false,
BlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device) { }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
return m_impl.dimensions();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return CoeffReturnType(index, m_impl.coeff(index));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, 1);
}
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
protected:
TensorEvaluator<ArgType, Device> m_impl;
};
namespace internal {
/** \class TensorTupleIndex
* \ingroup CXX11_Tensor_Module
*
* \brief Converts to Tensor<Tuple<Index, Scalar> > and reduces to Tensor<Index>.
*
*/
template<typename ReduceOp, typename Dims, typename XprType>
struct traits<TensorTupleReducerOp<ReduceOp, Dims, XprType> > : public traits<XprType>
{
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef Index Scalar;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
static const int Layout = XprTraits::Layout;
};
template<typename ReduceOp, typename Dims, typename XprType>
struct eval<TensorTupleReducerOp<ReduceOp, Dims, XprType>, Eigen::Dense>
{
typedef const TensorTupleReducerOp<ReduceOp, Dims, XprType>& type;
};
template<typename ReduceOp, typename Dims, typename XprType>
struct nested<TensorTupleReducerOp<ReduceOp, Dims, XprType>, 1,
typename eval<TensorTupleReducerOp<ReduceOp, Dims, XprType> >::type>
{
typedef TensorTupleReducerOp<ReduceOp, Dims, XprType> type;
};
} // end namespace internal
template<typename ReduceOp, typename Dims, typename XprType>
class TensorTupleReducerOp : public TensorBase<TensorTupleReducerOp<ReduceOp, Dims, XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorTupleReducerOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename Eigen::internal::nested<TensorTupleReducerOp>::type Nested;
typedef typename Eigen::internal::traits<TensorTupleReducerOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorTupleReducerOp>::Index Index;
typedef Index CoeffReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerOp(const XprType& expr,
const ReduceOp& reduce_op,
const int return_dim,
const Dims& reduce_dims)
: m_xpr(expr), m_reduce_op(reduce_op), m_return_dim(return_dim), m_reduce_dims(reduce_dims) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
EIGEN_DEVICE_FUNC
const ReduceOp& reduce_op() const { return m_reduce_op; }
EIGEN_DEVICE_FUNC
const Dims& reduce_dims() const { return m_reduce_dims; }
EIGEN_DEVICE_FUNC
int return_dim() const { return m_return_dim; }
protected:
typename XprType::Nested m_xpr;
const ReduceOp m_reduce_op;
const int m_return_dim;
const Dims m_reduce_dims;
};
// Eval as rvalue
template<typename ReduceOp, typename Dims, typename ArgType, typename Device>
struct TensorEvaluator<const TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Device>
{
typedef TensorTupleReducerOp<ReduceOp, Dims, ArgType> XprType;
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename TensorIndexTupleOp<ArgType>::CoeffReturnType TupleType;
typedef typename TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device>::Dimensions Dimensions;
typedef typename TensorEvaluator<const TensorIndexTupleOp<ArgType> , Device>::Dimensions InputDimensions;
static const int NumDims = internal::array_size<InputDimensions>::value;
typedef array<Index, NumDims> StrideDims;
enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
PacketAccess = /*TensorEvaluator<ArgType, Device>::PacketAccess*/ false,
BlockAccess = false,
Layout = TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_orig_impl(op.expression(), device),
m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device),
m_return_dim(op.return_dim()) {
gen_strides(m_orig_impl.dimensions(), m_strides);
if (Layout == static_cast<int>(ColMajor)) {
const Index total_size = internal::array_prod(m_orig_impl.dimensions());
m_stride_mod = (m_return_dim < NumDims - 1) ? m_strides[m_return_dim + 1] : total_size;
} else {
const Index total_size = internal::array_prod(m_orig_impl.dimensions());
m_stride_mod = (m_return_dim > 0) ? m_strides[m_return_dim - 1] : total_size;
}
m_stride_div = m_strides[m_return_dim];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
return m_impl.dimensions();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
const TupleType v = m_impl.coeff(index);
return (m_return_dim < 0) ? v.first : (v.first % m_stride_mod) / m_stride_div;
}
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
const double compute_cost = 1.0 +
(m_return_dim < 0 ? 0.0 : (TensorOpCost::ModCost<Index>() + TensorOpCost::DivCost<Index>()));
return m_orig_impl.costPerCoeff(vectorized) +
m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost);
}
private:
EIGEN_DEVICE_FUNC void gen_strides(const InputDimensions& dims, StrideDims& strides) {
if (m_return_dim < 0) {
return; // Won't be using the strides.
}
eigen_assert(m_return_dim < NumDims &&
"Asking to convert index to a dimension outside of the rank");
// Calculate m_stride_div and m_stride_mod, which are used to
// calculate the value of an index w.r.t. the m_return_dim.
if (Layout == static_cast<int>(ColMajor)) {
strides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
strides[i] = strides[i-1] * dims[i-1];
}
} else {
strides[NumDims-1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
strides[i] = strides[i+1] * dims[i+1];
}
}
}
protected:
TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device> m_orig_impl;
TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device> m_impl;
const int m_return_dim;
StrideDims m_strides;
Index m_stride_mod;
Index m_stride_div;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
| 11,022 | 35.743333 | 143 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
namespace Eigen {
/** \class TensorAssign
* \ingroup CXX11_Tensor_Module
*
* \brief The tensor assignment class.
*
* This class is represents the assignment of the values resulting from the evaluation of
* the rhs expression to the memory locations denoted by the lhs expression.
*/
namespace internal {
template<typename LhsXprType, typename RhsXprType>
struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
{
typedef typename LhsXprType::Scalar Scalar;
typedef typename traits<LhsXprType>::StorageKind StorageKind;
typedef typename promote_index_type<typename traits<LhsXprType>::Index,
typename traits<RhsXprType>::Index>::type Index;
typedef typename LhsXprType::Nested LhsNested;
typedef typename RhsXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const std::size_t NumDimensions = internal::traits<LhsXprType>::NumDimensions;
static const int Layout = internal::traits<LhsXprType>::Layout;
enum {
Flags = 0
};
};
template<typename LhsXprType, typename RhsXprType>
struct eval<TensorAssignOp<LhsXprType, RhsXprType>, Eigen::Dense>
{
typedef const TensorAssignOp<LhsXprType, RhsXprType>& type;
};
template<typename LhsXprType, typename RhsXprType>
struct nested<TensorAssignOp<LhsXprType, RhsXprType>, 1, typename eval<TensorAssignOp<LhsXprType, RhsXprType> >::type>
{
typedef TensorAssignOp<LhsXprType, RhsXprType> type;
};
} // end namespace internal
template<typename LhsXprType, typename RhsXprType>
class TensorAssignOp : public TensorBase<TensorAssignOp<LhsXprType, RhsXprType> >
{
public:
typedef typename Eigen::internal::traits<TensorAssignOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename LhsXprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorAssignOp>::type Nested;
typedef typename Eigen::internal::traits<TensorAssignOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorAssignOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs)
: m_lhs_xpr(lhs), m_rhs_xpr(rhs) {}
/** \returns the nested expressions */
EIGEN_DEVICE_FUNC
typename internal::remove_all<typename LhsXprType::Nested>::type&
lhsExpression() const { return *((typename internal::remove_all<typename LhsXprType::Nested>::type*)&m_lhs_xpr); }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename RhsXprType::Nested>::type&
rhsExpression() const { return m_rhs_xpr; }
protected:
typename internal::remove_all<typename LhsXprType::Nested>::type& m_lhs_xpr;
const typename internal::remove_all<typename RhsXprType::Nested>::type& m_rhs_xpr;
};
template<typename LeftArgType, typename RightArgType, typename Device>
struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
{
typedef TensorAssignOp<LeftArgType, RightArgType> XprType;
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename TensorEvaluator<RightArgType, Device>::Dimensions Dimensions;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
m_leftImpl(op.lhsExpression(), device),
m_rightImpl(op.rhsExpression(), device)
{
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
{
// The dimensions of the lhs and the rhs tensors should be equal to prevent
// overflows and ensure the result is fully initialized.
// TODO: use left impl instead if right impl dimensions are known at compile time.
return m_rightImpl.dimensions();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
m_leftImpl.evalSubExprsIfNeeded(NULL);
// If the lhs provides raw access to its storage area (i.e. if m_leftImpl.data() returns a non
// null value), attempt to evaluate the rhs expression in place. Returns true iff in place
// evaluation isn't supported and the caller still needs to manually assign the values generated
// by the rhs to the lhs.
return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_leftImpl.cleanup();
m_rightImpl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) {
m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) {
const int LhsStoreMode = TensorEvaluator<LeftArgType, Device>::IsAligned ? Aligned : Unaligned;
const int RhsLoadMode = TensorEvaluator<RightArgType, Device>::IsAligned ? Aligned : Unaligned;
m_leftImpl.template writePacket<LhsStoreMode>(i, m_rightImpl.template packet<RhsLoadMode>(i));
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{
return m_leftImpl.coeff(index);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const
{
return m_leftImpl.template packet<LoadMode>(index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
// We assume that evalPacket or evalScalar is called to perform the
// assignment and account for the cost of the write here, but reduce left
// cost by one load because we are using m_leftImpl.coeffRef.
TensorOpCost left = m_leftImpl.costPerCoeff(vectorized);
return m_rightImpl.costPerCoeff(vectorized) +
TensorOpCost(
numext::maxi(0.0, left.bytes_loaded() - sizeof(CoeffReturnType)),
left.bytes_stored(), left.compute_cycles()) +
TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize);
}
/// required by sycl in order to extract the accessor
const TensorEvaluator<LeftArgType, Device>& left_impl() const { return m_leftImpl; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<RightArgType, Device>& right_impl() const { return m_rightImpl; }
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_leftImpl.data(); }
private:
TensorEvaluator<LeftArgType, Device> m_leftImpl;
TensorEvaluator<RightArgType, Device> m_rightImpl;
};
}
#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
| 7,676 | 41.181319 | 189 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_BASE_H
#define EIGEN_CXX11_TENSOR_TENSOR_BASE_H
// clang-format off
namespace Eigen {
/** \class TensorBase
* \ingroup CXX11_Tensor_Module
*
* \brief The tensor base class.
*
* This class is the common parent of the Tensor and TensorMap class, thus
* making it possible to use either class interchangably in expressions.
*/
template<typename Derived>
class TensorBase<Derived, ReadOnlyAccessors>
{
public:
typedef internal::traits<Derived> DerivedTraits;
typedef typename DerivedTraits::Scalar Scalar;
typedef typename DerivedTraits::Index Index;
typedef typename internal::remove_const<Scalar>::type CoeffReturnType;
static const int NumDimensions = DerivedTraits::NumDimensions;
// Generic nullary operation support.
template <typename CustomNullaryOp> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<CustomNullaryOp, const Derived>
nullaryExpr(const CustomNullaryOp& func) const {
return TensorCwiseNullaryOp<CustomNullaryOp, const Derived>(derived(), func);
}
// Coefficient-wise nullary operators
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived>
constant(const Scalar& value) const {
return nullaryExpr(internal::scalar_constant_op<Scalar>(value));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<internal::UniformRandomGenerator<Scalar>, const Derived>
random() const {
return nullaryExpr(internal::UniformRandomGenerator<Scalar>());
}
template <typename RandomGenerator> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<RandomGenerator, const Derived>
random(const RandomGenerator& gen = RandomGenerator()) const {
return nullaryExpr(gen);
}
// Tensor generation
template <typename Generator> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorGeneratorOp<Generator, const Derived>
generate(const Generator& generator) const {
return TensorGeneratorOp<Generator, const Derived>(derived(), generator);
}
// Generic unary operation support.
template <typename CustomUnaryOp> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<CustomUnaryOp, const Derived>
unaryExpr(const CustomUnaryOp& func) const {
return TensorCwiseUnaryOp<CustomUnaryOp, const Derived>(derived(), func);
}
// Coefficient-wise unary operators
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived>
operator-() const {
return unaryExpr(internal::scalar_opposite_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived>
sqrt() const {
return unaryExpr(internal::scalar_sqrt_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived>
sign() const {
return unaryExpr(internal::scalar_sign_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_rsqrt_op<Scalar>, const Derived>
rsqrt() const {
return unaryExpr(internal::scalar_rsqrt_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived>
square() const {
return unaryExpr(internal::scalar_square_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_cube_op<Scalar>, const Derived>
cube() const {
return unaryExpr(internal::scalar_cube_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
inverse() const {
return unaryExpr(internal::scalar_inverse_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_tanh_op<Scalar>, const Derived>
tanh() const {
return unaryExpr(internal::scalar_tanh_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_lgamma_op<Scalar>, const Derived>
lgamma() const {
return unaryExpr(internal::scalar_lgamma_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_digamma_op<Scalar>, const Derived>
digamma() const {
return unaryExpr(internal::scalar_digamma_op<Scalar>());
}
// igamma(a = this, x = other)
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_igamma_op<Scalar>, const Derived, const OtherDerived>
igamma(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_igamma_op<Scalar>());
}
// igammac(a = this, x = other)
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_igammac_op<Scalar>, const Derived, const OtherDerived>
igammac(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_igammac_op<Scalar>());
}
// zeta(x = this, q = other)
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_zeta_op<Scalar>, const Derived, const OtherDerived>
zeta(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_zeta_op<Scalar>());
}
// polygamma(n = this, x = other)
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_polygamma_op<Scalar>, const Derived, const OtherDerived>
polygamma(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_polygamma_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_erf_op<Scalar>, const Derived>
erf() const {
return unaryExpr(internal::scalar_erf_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_erfc_op<Scalar>, const Derived>
erfc() const {
return unaryExpr(internal::scalar_erfc_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sigmoid_op<Scalar>, const Derived>
sigmoid() const {
return unaryExpr(internal::scalar_sigmoid_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_exp_op<Scalar>, const Derived>
exp() const {
return unaryExpr(internal::scalar_exp_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived>
log() const {
return unaryExpr(internal::scalar_log_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_log1p_op<Scalar>, const Derived>
log1p() const {
return unaryExpr(internal::scalar_log1p_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived>
abs() const {
return unaryExpr(internal::scalar_abs_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>
conjugate() const {
return unaryExpr(internal::scalar_conjugate_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_pow_op<Scalar,Scalar> >, const Derived>
pow(Scalar exponent) const {
return unaryExpr(internal::bind2nd_op<internal::scalar_pow_op<Scalar,Scalar> >(exponent));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_real_op<Scalar>, const Derived>
real() const {
return unaryExpr(internal::scalar_real_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_imag_op<Scalar>, const Derived>
imag() const {
return unaryExpr(internal::scalar_imag_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_sum_op<Scalar,Scalar> >, const Derived>
operator+ (Scalar rhs) const {
return unaryExpr(internal::bind2nd_op<internal::scalar_sum_op<Scalar,Scalar> >(rhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE friend
const TensorCwiseUnaryOp<internal::bind1st_op<internal::scalar_sum_op<Scalar> >, const Derived>
operator+ (Scalar lhs, const Derived& rhs) {
return rhs.unaryExpr(internal::bind1st_op<internal::scalar_sum_op<Scalar> >(lhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_difference_op<Scalar,Scalar> >, const Derived>
operator- (Scalar rhs) const {
EIGEN_STATIC_ASSERT((NumTraits<Scalar>::IsSigned || internal::is_same<Scalar, const std::complex<float> >::value), YOU_MADE_A_PROGRAMMING_MISTAKE);
return unaryExpr(internal::bind2nd_op<internal::scalar_difference_op<Scalar,Scalar> >(rhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE friend
const TensorCwiseUnaryOp<internal::bind1st_op<internal::scalar_difference_op<Scalar> >, const Derived>
operator- (Scalar lhs, const Derived& rhs) {
return rhs.unaryExpr(internal::bind1st_op<internal::scalar_difference_op<Scalar> >(lhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_product_op<Scalar,Scalar> >, const Derived>
operator* (Scalar rhs) const {
return unaryExpr(internal::bind2nd_op<internal::scalar_product_op<Scalar,Scalar> >(rhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE friend
const TensorCwiseUnaryOp<internal::bind1st_op<internal::scalar_product_op<Scalar> >, const Derived>
operator* (Scalar lhs, const Derived& rhs) {
return rhs.unaryExpr(internal::bind1st_op<internal::scalar_product_op<Scalar> >(lhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::bind2nd_op<internal::scalar_quotient_op<Scalar,Scalar> >, const Derived>
operator/ (Scalar rhs) const {
return unaryExpr(internal::bind2nd_op<internal::scalar_quotient_op<Scalar,Scalar> >(rhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE friend
const TensorCwiseUnaryOp<internal::bind1st_op<internal::scalar_quotient_op<Scalar> >, const Derived>
operator/ (Scalar lhs, const Derived& rhs) {
return rhs.unaryExpr(internal::bind1st_op<internal::scalar_quotient_op<Scalar> >(lhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_mod_op<Scalar>, const Derived>
operator% (Scalar rhs) const {
EIGEN_STATIC_ASSERT(NumTraits<Scalar>::IsInteger, YOU_MADE_A_PROGRAMMING_MISTAKE_TRY_MOD);
return unaryExpr(internal::scalar_mod_op<Scalar>(rhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
cwiseMax(Scalar threshold) const {
return cwiseMax(constant(threshold));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
cwiseMin(Scalar threshold) const {
return cwiseMin(constant(threshold));
}
template <typename NewType> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorConversionOp<NewType, const Derived>
cast() const {
return TensorConversionOp<NewType, const Derived>(derived());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_round_op<Scalar>, const Derived>
round() const {
return unaryExpr(internal::scalar_round_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_ceil_op<Scalar>, const Derived>
ceil() const {
return unaryExpr(internal::scalar_ceil_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_floor_op<Scalar>, const Derived>
floor() const {
return unaryExpr(internal::scalar_floor_op<Scalar>());
}
// Generic binary operation support.
template <typename CustomBinaryOp, typename OtherDerived> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>
binaryExpr(const OtherDerived& other, const CustomBinaryOp& func) const {
return TensorCwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>(derived(), other, func);
}
// Coefficient-wise binary operators.
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const OtherDerived>
operator+(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_sum_op<Scalar>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const Derived, const OtherDerived>
operator-(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_difference_op<Scalar>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_product_op<Scalar>, const Derived, const OtherDerived>
operator*(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_product_op<Scalar>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>
operator/(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_quotient_op<Scalar>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const OtherDerived>
cwiseMax(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_max_op<Scalar>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const OtherDerived>
cwiseMin(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_min_op<Scalar>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>
operator&&(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_boolean_and_op());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>
operator||(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_boolean_or_op());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived>
operator^(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_boolean_xor_op());
}
// Comparisons and tests.
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LT>, const Derived, const OtherDerived>
operator<(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LT>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LE>, const Derived, const OtherDerived>
operator<=(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LE>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GT>, const Derived, const OtherDerived>
operator>(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GT>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GE>, const Derived, const OtherDerived>
operator>=(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GE>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_EQ>, const Derived, const OtherDerived>
operator==(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_EQ>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_NEQ>, const Derived, const OtherDerived>
operator!=(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_NEQ>());
}
// comparisons and tests for Scalars
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LT>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
operator<(Scalar threshold) const {
return operator<(constant(threshold));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_LE>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
operator<=(Scalar threshold) const {
return operator<=(constant(threshold));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GT>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
operator>(Scalar threshold) const {
return operator>(constant(threshold));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_GE>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
operator>=(Scalar threshold) const {
return operator>=(constant(threshold));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_EQ>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
operator==(Scalar threshold) const {
return operator==(constant(threshold));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<internal::scalar_cmp_op<Scalar, Scalar, internal::cmp_NEQ>, const Derived, const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> >
operator!=(Scalar threshold) const {
return operator!=(constant(threshold));
}
// Checks
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_isnan_op<Scalar>, const Derived>
(isnan)() const {
return unaryExpr(internal::scalar_isnan_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_isinf_op<Scalar>, const Derived>
(isinf)() const {
return unaryExpr(internal::scalar_isinf_op<Scalar>());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_isfinite_op<Scalar>, const Derived>
(isfinite)() const {
return unaryExpr(internal::scalar_isfinite_op<Scalar>());
}
// Coefficient-wise ternary operators.
template<typename ThenDerived, typename ElseDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorSelectOp<const Derived, const ThenDerived, const ElseDerived>
select(const ThenDerived& thenTensor, const ElseDerived& elseTensor) const {
return TensorSelectOp<const Derived, const ThenDerived, const ElseDerived>(derived(), thenTensor.derived(), elseTensor.derived());
}
// Contractions.
typedef Eigen::IndexPair<Index> DimensionPair;
template<typename OtherDerived, typename Dimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorContractionOp<const Dimensions, const Derived, const OtherDerived>
contract(const OtherDerived& other, const Dimensions& dims) const {
return TensorContractionOp<const Dimensions, const Derived, const OtherDerived>(derived(), other.derived(), dims);
}
// Convolutions.
template<typename KernelDerived, typename Dimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorConvolutionOp<const Dimensions, const Derived, const KernelDerived>
convolve(const KernelDerived& kernel, const Dimensions& dims) const {
return TensorConvolutionOp<const Dimensions, const Derived, const KernelDerived>(derived(), kernel.derived(), dims);
}
// Fourier transforms
template <int FFTDataType, int FFTDirection, typename FFT> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorFFTOp<const FFT, const Derived, FFTDataType, FFTDirection>
fft(const FFT& fft) const {
return TensorFFTOp<const FFT, const Derived, FFTDataType, FFTDirection>(derived(), fft);
}
// Scan.
typedef TensorScanOp<internal::SumReducer<CoeffReturnType>, const Derived> TensorScanSumOp;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorScanSumOp
cumsum(const Index& axis, bool exclusive = false) const {
return TensorScanSumOp(derived(), axis, exclusive);
}
typedef TensorScanOp<internal::ProdReducer<CoeffReturnType>, const Derived> TensorScanProdOp;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorScanProdOp
cumprod(const Index& axis, bool exclusive = false) const {
return TensorScanProdOp(derived(), axis, exclusive);
}
template <typename Reducer>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorScanOp<Reducer, const Derived>
scan(const Index& axis, const Reducer& reducer, bool exclusive = false) const {
return TensorScanOp<Reducer, const Derived>(derived(), axis, exclusive, reducer);
}
// Reductions.
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::SumReducer<CoeffReturnType>, const Dims, const Derived>
sum(const Dims& dims) const {
return TensorReductionOp<internal::SumReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::SumReducer<CoeffReturnType>());
}
const TensorReductionOp<internal::SumReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
sum() const {
DimensionList<Index, NumDimensions> in_dims;
return TensorReductionOp<internal::SumReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::SumReducer<CoeffReturnType>());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const Dims, const Derived>
mean(const Dims& dims) const {
return TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::MeanReducer<CoeffReturnType>());
}
const TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
mean() const {
DimensionList<Index, NumDimensions> in_dims;
return TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::MeanReducer<CoeffReturnType>());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const Dims, const Derived>
prod(const Dims& dims) const {
return TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::ProdReducer<CoeffReturnType>());
}
const TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
prod() const {
DimensionList<Index, NumDimensions> in_dims;
return TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::ProdReducer<CoeffReturnType>());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const Dims, const Derived>
maximum(const Dims& dims) const {
return TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::MaxReducer<CoeffReturnType>());
}
const TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
maximum() const {
DimensionList<Index, NumDimensions> in_dims;
return TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::MaxReducer<CoeffReturnType>());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::MinReducer<CoeffReturnType>, const Dims, const Derived>
minimum(const Dims& dims) const {
return TensorReductionOp<internal::MinReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::MinReducer<CoeffReturnType>());
}
const TensorReductionOp<internal::MinReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>
minimum() const {
DimensionList<Index, NumDimensions> in_dims;
return TensorReductionOp<internal::MinReducer<CoeffReturnType>, const DimensionList<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::MinReducer<CoeffReturnType>());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::AndReducer, const Dims, const TensorConversionOp<bool, const Derived> >
all(const Dims& dims) const {
return cast<bool>().reduce(dims, internal::AndReducer());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::AndReducer, const DimensionList<Index, NumDimensions>, const TensorConversionOp<bool, const Derived> >
all() const {
DimensionList<Index, NumDimensions> in_dims;
return cast<bool>().reduce(in_dims, internal::AndReducer());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::OrReducer, const Dims, const TensorConversionOp<bool, const Derived> >
any(const Dims& dims) const {
return cast<bool>().reduce(dims, internal::OrReducer());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::OrReducer, const DimensionList<Index, NumDimensions>, const TensorConversionOp<bool, const Derived> >
any() const {
DimensionList<Index, NumDimensions> in_dims;
return cast<bool>().reduce(in_dims, internal::OrReducer());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorTupleReducerOp<
internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, NumDimensions>, const Derived>
argmax() const {
array<Index, NumDimensions> in_dims;
for (int d = 0; d < NumDimensions; ++d) in_dims[d] = d;
return TensorTupleReducerOp<
internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, NumDimensions>,
const Derived>(derived(), internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >(), -1, in_dims);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorTupleReducerOp<
internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, NumDimensions>, const Derived>
argmin() const {
array<Index, NumDimensions> in_dims;
for (int d = 0; d < NumDimensions; ++d) in_dims[d] = d;
return TensorTupleReducerOp<
internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, NumDimensions>,
const Derived>(derived(), internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >(), -1, in_dims);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorTupleReducerOp<
internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, 1>, const Derived>
argmax(const int return_dim) const {
array<Index, 1> in_dims;
in_dims[0] = return_dim;
return TensorTupleReducerOp<
internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, 1>,
const Derived>(derived(), internal::ArgMaxTupleReducer<Tuple<Index, CoeffReturnType> >(), return_dim, in_dims);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorTupleReducerOp<
internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, 1>, const Derived>
argmin(const int return_dim) const {
array<Index, 1> in_dims;
in_dims[0] = return_dim;
return TensorTupleReducerOp<
internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >,
const array<Index, 1>,
const Derived>(derived(), internal::ArgMinTupleReducer<Tuple<Index, CoeffReturnType> >(), return_dim, in_dims);
}
template <typename Reducer, typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<Reducer, const Dims, const Derived>
reduce(const Dims& dims, const Reducer& reducer) const {
return TensorReductionOp<Reducer, const Dims, const Derived>(derived(), dims, reducer);
}
template <typename Broadcast> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorBroadcastingOp<const Broadcast, const Derived>
broadcast(const Broadcast& broadcast) const {
return TensorBroadcastingOp<const Broadcast, const Derived>(derived(), broadcast);
}
template <typename Axis, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorConcatenationOp<Axis, const Derived, const OtherDerived>
concatenate(const OtherDerived& other, Axis axis) const {
return TensorConcatenationOp<Axis, const Derived, const OtherDerived>(derived(), other.derived(), axis);
}
template <typename PatchDims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorPatchOp<const PatchDims, const Derived>
extract_patches(const PatchDims& patch_dims) const {
return TensorPatchOp<const PatchDims, const Derived>(derived(), patch_dims);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorImagePatchOp<Dynamic, Dynamic, const Derived>
extract_image_patches(const Index patch_rows = 1, const Index patch_cols = 1,
const Index row_stride = 1, const Index col_stride = 1,
const Index in_row_stride = 1, const Index in_col_stride = 1,
const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const {
return TensorImagePatchOp<Dynamic, Dynamic, const Derived>(derived(), patch_rows, patch_cols, row_stride, col_stride,
in_row_stride, in_col_stride, 1, 1, padding_type, padding_value);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorImagePatchOp<Dynamic, Dynamic, const Derived>
extract_image_patches(const Index patch_rows, const Index patch_cols,
const Index row_stride, const Index col_stride,
const Index in_row_stride, const Index in_col_stride,
const Index row_inflate_stride, const Index col_inflate_stride,
const Index padding_top, const Index padding_bottom,
const Index padding_left,const Index padding_right,
const Scalar padding_value) const {
return TensorImagePatchOp<Dynamic, Dynamic, const Derived>(derived(), patch_rows, patch_cols, row_stride, col_stride,
in_row_stride, in_col_stride, row_inflate_stride, col_inflate_stride,
padding_top, padding_bottom, padding_left, padding_right, padding_value);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Derived>
extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols,
const Index plane_stride = 1, const Index row_stride = 1, const Index col_stride = 1,
const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const {
return TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Derived>(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, 1, 1, 1, padding_type, padding_value);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Derived>
extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols,
const Index plane_stride, const Index row_stride, const Index col_stride,
const Index plane_inflate_stride, const Index row_inflate_stride, const Index col_inflate_stride,
const Index padding_top_z, const Index padding_bottom_z,
const Index padding_top, const Index padding_bottom,
const Index padding_left, const Index padding_right, const Scalar padding_value = Scalar(0)) const {
return TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Derived>(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, plane_inflate_stride, row_inflate_stride, col_inflate_stride, padding_top_z, padding_bottom_z, padding_top, padding_bottom, padding_left, padding_right, padding_value);
}
// Morphing operators.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorLayoutSwapOp<const Derived>
swap_layout() const {
return TensorLayoutSwapOp<const Derived>(derived());
}
template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReshapingOp<const NewDimensions, const Derived>
reshape(const NewDimensions& newDimensions) const {
return TensorReshapingOp<const NewDimensions, const Derived>(derived(), newDimensions);
}
template <typename StartIndices, typename Sizes> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorSlicingOp<const StartIndices, const Sizes, const Derived>
slice(const StartIndices& startIndices, const Sizes& sizes) const {
return TensorSlicingOp<const StartIndices, const Sizes, const Derived>(derived(), startIndices, sizes);
}
template <typename StartIndices, typename StopIndices, typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides, const Derived>
stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const {
return TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides,
const Derived>(derived(), startIndices, stopIndices, strides);
}
template <Index DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorChippingOp<DimId, const Derived>
chip(const Index offset) const {
return TensorChippingOp<DimId, const Derived>(derived(), offset, DimId);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorChippingOp<Dynamic, const Derived>
chip(const Index offset, const Index dim) const {
return TensorChippingOp<Dynamic, const Derived>(derived(), offset, dim);
}
template <typename ReverseDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReverseOp<const ReverseDimensions, const Derived>
reverse(const ReverseDimensions& rev) const {
return TensorReverseOp<const ReverseDimensions, const Derived>(derived(), rev);
}
template <typename PaddingDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorPaddingOp<const PaddingDimensions, const Derived>
pad(const PaddingDimensions& padding) const {
return TensorPaddingOp<const PaddingDimensions, const Derived>(derived(), padding, internal::scalar_cast_op<int, Scalar>()(0));
}
template <typename PaddingDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorPaddingOp<const PaddingDimensions, const Derived>
pad(const PaddingDimensions& padding, const Scalar padding_value) const {
return TensorPaddingOp<const PaddingDimensions, const Derived>(derived(), padding, padding_value);
}
template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorShufflingOp<const Shuffle, const Derived>
shuffle(const Shuffle& shuffle) const {
return TensorShufflingOp<const Shuffle, const Derived>(derived(), shuffle);
}
template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorStridingOp<const Strides, const Derived>
stride(const Strides& strides) const {
return TensorStridingOp<const Strides, const Derived>(derived(), strides);
}
template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorInflationOp<const Strides, const Derived>
inflate(const Strides& strides) const {
return TensorInflationOp<const Strides, const Derived>(derived(), strides);
}
// Returns a tensor containing index/value tuples
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorIndexTupleOp<const Derived>
index_tuples() const {
return TensorIndexTupleOp<const Derived>(derived());
}
// Support for custom unary and binary operations
template <typename CustomUnaryFunc>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCustomUnaryOp<const CustomUnaryFunc, const Derived> customOp(const CustomUnaryFunc& op) const {
return TensorCustomUnaryOp<const CustomUnaryFunc, const Derived>(derived(), op);
}
template <typename OtherDerived, typename CustomBinaryFunc>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCustomBinaryOp<const CustomBinaryFunc, const Derived, const OtherDerived> customOp(const OtherDerived& other, const CustomBinaryFunc& op) const {
return TensorCustomBinaryOp<const CustomBinaryFunc, const Derived, const OtherDerived>(derived(), other, op);
}
// Force the evaluation of the expression.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorForcedEvalOp<const Derived> eval() const {
return TensorForcedEvalOp<const Derived>(derived());
}
protected:
template <typename Scalar, int NumIndices, int Options, typename IndexType> friend class Tensor;
template <typename Scalar, typename Dimensions, int Option, typename IndexTypes> friend class TensorFixedSize;
template <typename OtherDerived, int AccessLevel> friend class TensorBase;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast<const Derived*>(this); }
};
template<typename Derived, int AccessLevel = internal::accessors_level<Derived>::value>
class TensorBase : public TensorBase<Derived, ReadOnlyAccessors> {
public:
typedef internal::traits<Derived> DerivedTraits;
typedef typename DerivedTraits::Scalar Scalar;
typedef typename DerivedTraits::Index Index;
typedef Scalar CoeffReturnType;
static const int NumDimensions = DerivedTraits::NumDimensions;
template <typename Scalar, int NumIndices, int Options, typename IndexType> friend class Tensor;
template <typename Scalar, typename Dimensions, int Option, typename IndexTypes> friend class TensorFixedSize;
template <typename OtherDerived, int OtherAccessLevel> friend class TensorBase;
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& setZero() {
return setConstant(Scalar(0));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& setConstant(const Scalar& val) {
return derived() = this->constant(val);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& setRandom() {
return derived() = this->random();
}
template <typename RandomGenerator> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& setRandom() {
return derived() = this->template random<RandomGenerator>();
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& setValues(
const typename internal::Initializer<Derived, NumDimensions>::InitList& vals) {
TensorEvaluator<Derived, DefaultDevice> eval(derived(), DefaultDevice());
internal::initialize_tensor<Derived, NumDimensions>(eval, vals);
return derived();
}
#endif // EIGEN_HAS_VARIADIC_TEMPLATES
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator+=(const OtherDerived& other) {
return derived() = derived() + other.derived();
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator-=(const OtherDerived& other) {
return derived() = derived() - other.derived();
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator*=(const OtherDerived& other) {
return derived() = derived() * other.derived();
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator/=(const OtherDerived& other) {
return derived() = derived() / other.derived();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorLayoutSwapOp<const Derived>
swap_layout() const {
return TensorLayoutSwapOp<const Derived>(derived());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorLayoutSwapOp<Derived>
swap_layout() {
return TensorLayoutSwapOp<Derived>(derived());
}
template <typename Axis, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorConcatenationOp<const Axis, const Derived, const OtherDerived>
concatenate(const OtherDerived& other, const Axis& axis) const {
return TensorConcatenationOp<const Axis, const Derived, const OtherDerived>(derived(), other, axis);
}
template <typename Axis, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorConcatenationOp<const Axis, Derived, OtherDerived>
concatenate(const OtherDerived& other, const Axis& axis) {
return TensorConcatenationOp<const Axis, Derived, OtherDerived>(derived(), other, axis);
}
template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReshapingOp<const NewDimensions, const Derived>
reshape(const NewDimensions& newDimensions) const {
return TensorReshapingOp<const NewDimensions, const Derived>(derived(), newDimensions);
}
template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorReshapingOp<const NewDimensions, Derived>
reshape(const NewDimensions& newDimensions) {
return TensorReshapingOp<const NewDimensions, Derived>(derived(), newDimensions);
}
template <typename StartIndices, typename Sizes> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorSlicingOp<const StartIndices, const Sizes, const Derived>
slice(const StartIndices& startIndices, const Sizes& sizes) const {
return TensorSlicingOp<const StartIndices, const Sizes, const Derived>(derived(), startIndices, sizes);
}
template <typename StartIndices, typename Sizes> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorSlicingOp<const StartIndices, const Sizes, Derived>
slice(const StartIndices& startIndices, const Sizes& sizes) {
return TensorSlicingOp<const StartIndices, const Sizes, Derived>(derived(), startIndices, sizes);
}
template <typename StartIndices, typename StopIndices, typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides, const Derived>
stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const {
return TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides,
const Derived>(derived(), startIndices, stopIndices, strides);
}
template <typename StartIndices, typename StopIndices, typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides, Derived>
stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) {
return TensorStridingSlicingOp<const StartIndices, const StopIndices, const Strides,
Derived>(derived(), startIndices, stopIndices, strides);
}
template <DenseIndex DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorChippingOp<DimId, const Derived>
chip(const Index offset) const {
return TensorChippingOp<DimId, const Derived>(derived(), offset, DimId);
}
template <Index DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorChippingOp<DimId, Derived>
chip(const Index offset) {
return TensorChippingOp<DimId, Derived>(derived(), offset, DimId);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorChippingOp<Dynamic, const Derived>
chip(const Index offset, const Index dim) const {
return TensorChippingOp<Dynamic, const Derived>(derived(), offset, dim);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorChippingOp<Dynamic, Derived>
chip(const Index offset, const Index dim) {
return TensorChippingOp<Dynamic, Derived>(derived(), offset, dim);
}
template <typename ReverseDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReverseOp<const ReverseDimensions, const Derived>
reverse(const ReverseDimensions& rev) const {
return TensorReverseOp<const ReverseDimensions, const Derived>(derived(), rev);
}
template <typename ReverseDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorReverseOp<const ReverseDimensions, Derived>
reverse(const ReverseDimensions& rev) {
return TensorReverseOp<const ReverseDimensions, Derived>(derived(), rev);
}
template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorShufflingOp<const Shuffle, const Derived>
shuffle(const Shuffle& shuffle) const {
return TensorShufflingOp<const Shuffle, const Derived>(derived(), shuffle);
}
template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorShufflingOp<const Shuffle, Derived>
shuffle(const Shuffle& shuffle) {
return TensorShufflingOp<const Shuffle, Derived>(derived(), shuffle);
}
template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorStridingOp<const Strides, const Derived>
stride(const Strides& strides) const {
return TensorStridingOp<const Strides, const Derived>(derived(), strides);
}
template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorStridingOp<const Strides, Derived>
stride(const Strides& strides) {
return TensorStridingOp<const Strides, Derived>(derived(), strides);
}
// Select the device on which to evaluate the expression.
template <typename DeviceType>
TensorDevice<Derived, DeviceType> device(const DeviceType& device) {
return TensorDevice<Derived, DeviceType>(device, derived());
}
protected:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& derived() { return *static_cast<Derived*>(this); }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast<const Derived*>(this); }
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_BASE_H
| 49,473 | 47.935707 | 339 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H
#define EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H
namespace Eigen {
/** \class TensorBroadcasting
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor broadcasting class.
*
*
*/
namespace internal {
template<typename Broadcast, typename XprType>
struct traits<TensorBroadcastingOp<Broadcast, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
};
template<typename Broadcast, typename XprType>
struct eval<TensorBroadcastingOp<Broadcast, XprType>, Eigen::Dense>
{
typedef const TensorBroadcastingOp<Broadcast, XprType>& type;
};
template<typename Broadcast, typename XprType>
struct nested<TensorBroadcastingOp<Broadcast, XprType>, 1, typename eval<TensorBroadcastingOp<Broadcast, XprType> >::type>
{
typedef TensorBroadcastingOp<Broadcast, XprType> type;
};
template <typename Dims>
struct is_input_scalar {
static const bool value = false;
};
template <>
struct is_input_scalar<Sizes<> > {
static const bool value = true;
};
#ifndef EIGEN_EMULATE_CXX11_META_H
template <typename std::size_t... Indices>
struct is_input_scalar<Sizes<Indices...> > {
static const bool value = (Sizes<Indices...>::total_size == 1);
};
#endif
} // end namespace internal
template<typename Broadcast, typename XprType>
class TensorBroadcastingOp : public TensorBase<TensorBroadcastingOp<Broadcast, XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorBroadcastingOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorBroadcastingOp>::type Nested;
typedef typename Eigen::internal::traits<TensorBroadcastingOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorBroadcastingOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBroadcastingOp(const XprType& expr, const Broadcast& broadcast)
: m_xpr(expr), m_broadcast(broadcast) {}
EIGEN_DEVICE_FUNC
const Broadcast& broadcast() const { return m_broadcast; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
const Broadcast m_broadcast;
};
// Eval as rvalue
template<typename Broadcast, typename ArgType, typename Device>
struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
{
typedef TensorBroadcastingOp<Broadcast, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = true,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_broadcast(op.broadcast()),m_impl(op.expression(), device)
{
// The broadcasting op doesn't change the rank of the tensor. One can't broadcast a scalar
// and store the result in a scalar. Instead one should reshape the scalar into a a N-D
// tensor with N >= 1 of 1 element first and then broadcast.
EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
const InputDimensions& input_dims = m_impl.dimensions();
const Broadcast& broadcast = op.broadcast();
for (int i = 0; i < NumDims; ++i) {
eigen_assert(input_dims[i] > 0);
m_dimensions[i] = input_dims[i] * broadcast[i];
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_inputStrides[0] = 1;
m_outputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
}
} else {
m_inputStrides[NumDims-1] = 1;
m_outputStrides[NumDims-1] = 1;
for (int i = NumDims-2; i >= 0; --i) {
m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const
{
if (internal::is_input_scalar<typename internal::remove_all<InputDimensions>::type>::value) {
return m_impl.coeff(0);
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
return coeffColMajor(index);
} else {
return coeffRowMajor(index);
}
}
// TODO: attempt to speed this up. The integer divisions and modulo are slow
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffColMajor(Index index) const
{
Index inputIndex = 0;
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i];
if (internal::index_statically_eq<Broadcast>(i, 1)) {
eigen_assert(idx < m_impl.dimensions()[i]);
inputIndex += idx * m_inputStrides[i];
} else {
if (internal::index_statically_eq<InputDimensions>(i, 1)) {
eigen_assert(idx % m_impl.dimensions()[i] == 0);
} else {
inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
}
}
index -= idx * m_outputStrides[i];
}
if (internal::index_statically_eq<Broadcast>(0, 1)) {
eigen_assert(index < m_impl.dimensions()[0]);
inputIndex += index;
} else {
if (internal::index_statically_eq<InputDimensions>(0, 1)) {
eigen_assert(index % m_impl.dimensions()[0] == 0);
} else {
inputIndex += (index % m_impl.dimensions()[0]);
}
}
return m_impl.coeff(inputIndex);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffRowMajor(Index index) const
{
Index inputIndex = 0;
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
if (internal::index_statically_eq<Broadcast>(i, 1)) {
eigen_assert(idx < m_impl.dimensions()[i]);
inputIndex += idx * m_inputStrides[i];
} else {
if (internal::index_statically_eq<InputDimensions>(i, 1)) {
eigen_assert(idx % m_impl.dimensions()[i] == 0);
} else {
inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
}
}
index -= idx * m_outputStrides[i];
}
if (internal::index_statically_eq<Broadcast>(NumDims-1, 1)) {
eigen_assert(index < m_impl.dimensions()[NumDims-1]);
inputIndex += index;
} else {
if (internal::index_statically_eq<InputDimensions>(NumDims-1, 1)) {
eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0);
} else {
inputIndex += (index % m_impl.dimensions()[NumDims-1]);
}
}
return m_impl.coeff(inputIndex);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const
{
if (internal::is_input_scalar<typename internal::remove_all<InputDimensions>::type>::value) {
return internal::pset1<PacketReturnType>(m_impl.coeff(0));
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
return packetColMajor<LoadMode>(index);
} else {
return packetRowMajor<LoadMode>(index);
}
}
// Ignore the LoadMode and always use unaligned loads since we can't guarantee
// the alignment at compile time.
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
const Index originalIndex = index;
Index inputIndex = 0;
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i];
if (internal::index_statically_eq<Broadcast>(i, 1)) {
eigen_assert(idx < m_impl.dimensions()[i]);
inputIndex += idx * m_inputStrides[i];
} else {
if (internal::index_statically_eq<InputDimensions>(i, 1)) {
eigen_assert(idx % m_impl.dimensions()[i] == 0);
} else {
inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
}
}
index -= idx * m_outputStrides[i];
}
Index innermostLoc;
if (internal::index_statically_eq<Broadcast>(0, 1)) {
eigen_assert(index < m_impl.dimensions()[0]);
innermostLoc = index;
} else {
if (internal::index_statically_eq<InputDimensions>(0, 1)) {
eigen_assert(index % m_impl.dimensions()[0] == 0);
innermostLoc = 0;
} else {
innermostLoc = index % m_impl.dimensions()[0];
}
}
inputIndex += innermostLoc;
// Todo: this could be extended to the second dimension if we're not
// broadcasting alongside the first dimension, and so on.
if (innermostLoc + PacketSize <= m_impl.dimensions()[0]) {
return m_impl.template packet<Unaligned>(inputIndex);
} else {
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
values[0] = m_impl.coeff(inputIndex);
for (int i = 1; i < PacketSize; ++i) {
values[i] = coeffColMajor(originalIndex+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
const Index originalIndex = index;
Index inputIndex = 0;
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
if (internal::index_statically_eq<Broadcast>(i, 1)) {
eigen_assert(idx < m_impl.dimensions()[i]);
inputIndex += idx * m_inputStrides[i];
} else {
if (internal::index_statically_eq<InputDimensions>(i, 1)) {
eigen_assert(idx % m_impl.dimensions()[i] == 0);
} else {
inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
}
}
index -= idx * m_outputStrides[i];
}
Index innermostLoc;
if (internal::index_statically_eq<Broadcast>(NumDims-1, 1)) {
eigen_assert(index < m_impl.dimensions()[NumDims-1]);
innermostLoc = index;
} else {
if (internal::index_statically_eq<InputDimensions>(NumDims-1, 1)) {
eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0);
innermostLoc = 0;
} else {
innermostLoc = index % m_impl.dimensions()[NumDims-1];
}
}
inputIndex += innermostLoc;
// Todo: this could be extended to the second dimension if we're not
// broadcasting alongside the first dimension, and so on.
if (innermostLoc + PacketSize <= m_impl.dimensions()[NumDims-1]) {
return m_impl.template packet<Unaligned>(inputIndex);
} else {
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
values[0] = m_impl.coeff(inputIndex);
for (int i = 1; i < PacketSize; ++i) {
values[i] = coeffRowMajor(originalIndex+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
double compute_cost = TensorOpCost::AddCost<Index>();
if (NumDims > 0) {
for (int i = NumDims - 1; i > 0; --i) {
compute_cost += TensorOpCost::DivCost<Index>();
if (internal::index_statically_eq<Broadcast>(i, 1)) {
compute_cost +=
TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
} else {
if (!internal::index_statically_eq<InputDimensions>(i, 1)) {
compute_cost += TensorOpCost::MulCost<Index>() +
TensorOpCost::ModCost<Index>() +
TensorOpCost::AddCost<Index>();
}
}
compute_cost +=
TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
}
}
return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
Broadcast functor() const { return m_broadcast; }
protected:
const Broadcast m_broadcast;
Dimensions m_dimensions;
array<Index, NumDims> m_outputStrides;
array<Index, NumDims> m_inputStrides;
TensorEvaluator<ArgType, Device> m_impl;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H
| 14,286 | 35.35369 | 122 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
#define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
namespace Eigen {
/** \class TensorKChippingReshaping
* \ingroup CXX11_Tensor_Module
*
* \brief A chip is a thin slice, corresponding to a column or a row in a 2-d tensor.
*
*
*/
namespace internal {
template<DenseIndex DimId, typename XprType>
struct traits<TensorChippingOp<DimId, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions - 1;
static const int Layout = XprTraits::Layout;
};
template<DenseIndex DimId, typename XprType>
struct eval<TensorChippingOp<DimId, XprType>, Eigen::Dense>
{
typedef const TensorChippingOp<DimId, XprType>& type;
};
template<DenseIndex DimId, typename XprType>
struct nested<TensorChippingOp<DimId, XprType>, 1, typename eval<TensorChippingOp<DimId, XprType> >::type>
{
typedef TensorChippingOp<DimId, XprType> type;
};
template <DenseIndex DimId>
struct DimensionId
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) {
eigen_assert(dim == DimId);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
return DimId;
}
};
template <>
struct DimensionId<Dynamic>
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) : actual_dim(dim) {
eigen_assert(dim >= 0);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
return actual_dim;
}
private:
const DenseIndex actual_dim;
};
} // end namespace internal
template<DenseIndex DimId, typename XprType>
class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> >
{
public:
typedef typename Eigen::internal::traits<TensorChippingOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorChippingOp>::type Nested;
typedef typename Eigen::internal::traits<TensorChippingOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorChippingOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset, const Index dim)
: m_xpr(expr), m_offset(offset), m_dim(dim) {
}
EIGEN_DEVICE_FUNC
const Index offset() const { return m_offset; }
EIGEN_DEVICE_FUNC
const Index dim() const { return m_dim.actualDim(); }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorChippingOp& operator = (const TensorChippingOp& other)
{
typedef TensorAssignOp<TensorChippingOp, const TensorChippingOp> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this;
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorChippingOp& operator = (const OtherDerived& other)
{
typedef TensorAssignOp<TensorChippingOp, const OtherDerived> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this;
}
protected:
typename XprType::Nested m_xpr;
const Index m_offset;
const internal::DimensionId<DimId> m_dim;
};
// Eval as rvalue
template<DenseIndex DimId, typename ArgType, typename Device>
struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
{
typedef TensorChippingOp<DimId, ArgType> XprType;
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
static const int NumDims = NumInputDims-1;
typedef typename XprType::Index Index;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
// Alignment can't be guaranteed at compile time since it depends on the
// slice offsets.
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_dim(op.dim()), m_device(device)
{
EIGEN_STATIC_ASSERT((NumInputDims >= 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
eigen_assert(NumInputDims > m_dim.actualDim());
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
eigen_assert(op.offset() < input_dims[m_dim.actualDim()]);
int j = 0;
for (int i = 0; i < NumInputDims; ++i) {
if (i != m_dim.actualDim()) {
m_dimensions[j] = input_dims[i];
++j;
}
}
m_stride = 1;
m_inputStride = 1;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = 0; i < m_dim.actualDim(); ++i) {
m_stride *= input_dims[i];
m_inputStride *= input_dims[i];
}
} else {
for (int i = NumInputDims-1; i > m_dim.actualDim(); --i) {
m_stride *= input_dims[i];
m_inputStride *= input_dims[i];
}
}
m_inputStride *= input_dims[m_dim.actualDim()];
m_inputOffset = m_stride * op.offset();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return m_impl.coeff(srcCoeff(index));
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
// m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(m_stride == 1);
Index inputIndex = index * m_inputStride + m_inputOffset;
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
for (int i = 0; i < PacketSize; ++i) {
values[i] = m_impl.coeff(inputIndex);
inputIndex += m_inputStride;
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
} else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
// m_stride is aways greater than index, so let's avoid the integer division.
eigen_assert(m_stride > index);
return m_impl.template packet<LoadMode>(index + m_inputOffset);
} else {
const Index idx = index / m_stride;
const Index rem = index - idx * m_stride;
if (rem + PacketSize <= m_stride) {
Index inputIndex = idx * m_inputStride + m_inputOffset + rem;
return m_impl.template packet<LoadMode>(inputIndex);
} else {
// Cross the stride boundary. Fallback to slow path.
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
for (int i = 0; i < PacketSize; ++i) {
values[i] = coeff(index);
++index;
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
double cost = 0;
if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) &&
m_dim.actualDim() == 0) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) &&
m_dim.actualDim() == NumInputDims - 1)) {
cost += TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
} else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) &&
m_dim.actualDim() == NumInputDims - 1) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) &&
m_dim.actualDim() == 0)) {
cost += TensorOpCost::AddCost<Index>();
} else {
cost += 3 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>() +
3 * TensorOpCost::AddCost<Index>();
}
return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, cost, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const {
CoeffReturnType* result = const_cast<CoeffReturnType*>(m_impl.data());
if (((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumDims) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) &&
result) {
return result + m_inputOffset;
} else {
return NULL;
}
}
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
{
Index inputIndex;
if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
// m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(m_stride == 1);
inputIndex = index * m_inputStride + m_inputOffset;
} else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims-1) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
// m_stride is aways greater than index, so let's avoid the integer division.
eigen_assert(m_stride > index);
inputIndex = index + m_inputOffset;
} else {
const Index idx = index / m_stride;
inputIndex = idx * m_inputStride + m_inputOffset;
index -= idx * m_stride;
inputIndex += index;
}
return inputIndex;
}
Dimensions m_dimensions;
Index m_stride;
Index m_inputOffset;
Index m_inputStride;
TensorEvaluator<ArgType, Device> m_impl;
const internal::DimensionId<DimId> m_dim;
const Device& m_device;
};
// Eval as lvalue
template<DenseIndex DimId, typename ArgType, typename Device>
struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
: public TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
{
typedef TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> Base;
typedef TensorChippingOp<DimId, ArgType> XprType;
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
static const int NumDims = NumInputDims-1;
typedef typename XprType::Index Index;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device)
{ }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
{
return this->m_impl.coeffRef(this->srcCoeff(index));
}
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == 0) ||
(static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == NumInputDims-1)) {
// m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(this->m_stride == 1);
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
Index inputIndex = index * this->m_inputStride + this->m_inputOffset;
for (int i = 0; i < PacketSize; ++i) {
this->m_impl.coeffRef(inputIndex) = values[i];
inputIndex += this->m_inputStride;
}
} else if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == NumInputDims-1) ||
(static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == 0)) {
// m_stride is aways greater than index, so let's avoid the integer division.
eigen_assert(this->m_stride > index);
this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x);
} else {
const Index idx = index / this->m_stride;
const Index rem = index - idx * this->m_stride;
if (rem + PacketSize <= this->m_stride) {
const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem;
this->m_impl.template writePacket<StoreMode>(inputIndex, x);
} else {
// Cross stride boundary. Fallback to slow path.
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
for (int i = 0; i < PacketSize; ++i) {
this->coeffRef(index) = values[i];
++index;
}
}
}
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
| 14,755 | 37.327273 | 125 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H
namespace Eigen {
/** \class TensorConcatenationOp
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor concatenation class.
*
*
*/
namespace internal {
template<typename Axis, typename LhsXprType, typename RhsXprType>
struct traits<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename promote_storage_type<typename LhsXprType::Scalar,
typename RhsXprType::Scalar>::ret Scalar;
typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
typename traits<RhsXprType>::StorageKind>::ret StorageKind;
typedef typename promote_index_type<typename traits<LhsXprType>::Index,
typename traits<RhsXprType>::Index>::type Index;
typedef typename LhsXprType::Nested LhsNested;
typedef typename RhsXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const int NumDimensions = traits<LhsXprType>::NumDimensions;
static const int Layout = traits<LhsXprType>::Layout;
enum { Flags = 0 };
};
template<typename Axis, typename LhsXprType, typename RhsXprType>
struct eval<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>, Eigen::Dense>
{
typedef const TensorConcatenationOp<Axis, LhsXprType, RhsXprType>& type;
};
template<typename Axis, typename LhsXprType, typename RhsXprType>
struct nested<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>, 1, typename eval<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >::type>
{
typedef TensorConcatenationOp<Axis, LhsXprType, RhsXprType> type;
};
} // end namespace internal
template<typename Axis, typename LhsXprType, typename RhsXprType>
class TensorConcatenationOp : public TensorBase<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>, WriteAccessors>
{
public:
typedef typename internal::traits<TensorConcatenationOp>::Scalar Scalar;
typedef typename internal::traits<TensorConcatenationOp>::StorageKind StorageKind;
typedef typename internal::traits<TensorConcatenationOp>::Index Index;
typedef typename internal::nested<TensorConcatenationOp>::type Nested;
typedef typename internal::promote_storage_type<typename LhsXprType::CoeffReturnType,
typename RhsXprType::CoeffReturnType>::ret CoeffReturnType;
typedef typename NumTraits<Scalar>::Real RealScalar;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConcatenationOp(const LhsXprType& lhs, const RhsXprType& rhs, Axis axis)
: m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_axis(axis) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename LhsXprType::Nested>::type&
lhsExpression() const { return m_lhs_xpr; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename RhsXprType::Nested>::type&
rhsExpression() const { return m_rhs_xpr; }
EIGEN_DEVICE_FUNC const Axis& axis() const { return m_axis; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorConcatenationOp& operator = (const TensorConcatenationOp& other)
{
typedef TensorAssignOp<TensorConcatenationOp, const TensorConcatenationOp> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this;
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorConcatenationOp& operator = (const OtherDerived& other)
{
typedef TensorAssignOp<TensorConcatenationOp, const OtherDerived> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this;
}
protected:
typename LhsXprType::Nested m_lhs_xpr;
typename RhsXprType::Nested m_rhs_xpr;
const Axis m_axis;
};
// Eval as rvalue
template<typename Axis, typename LeftArgType, typename RightArgType, typename Device>
struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
{
typedef TensorConcatenationOp<Axis, LeftArgType, RightArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<LeftArgType, Device>::Dimensions>::value;
static const int RightNumDims = internal::array_size<typename TensorEvaluator<RightArgType, Device>::Dimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis())
{
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout) || NumDims == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT((NumDims == RightNumDims), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
eigen_assert(0 <= m_axis && m_axis < NumDims);
const Dimensions& lhs_dims = m_leftImpl.dimensions();
const Dimensions& rhs_dims = m_rightImpl.dimensions();
{
int i = 0;
for (; i < m_axis; ++i) {
eigen_assert(lhs_dims[i] > 0);
eigen_assert(lhs_dims[i] == rhs_dims[i]);
m_dimensions[i] = lhs_dims[i];
}
eigen_assert(lhs_dims[i] > 0); // Now i == m_axis.
eigen_assert(rhs_dims[i] > 0);
m_dimensions[i] = lhs_dims[i] + rhs_dims[i];
for (++i; i < NumDims; ++i) {
eigen_assert(lhs_dims[i] > 0);
eigen_assert(lhs_dims[i] == rhs_dims[i]);
m_dimensions[i] = lhs_dims[i];
}
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_leftStrides[0] = 1;
m_rightStrides[0] = 1;
m_outputStrides[0] = 1;
for (int j = 1; j < NumDims; ++j) {
m_leftStrides[j] = m_leftStrides[j-1] * lhs_dims[j-1];
m_rightStrides[j] = m_rightStrides[j-1] * rhs_dims[j-1];
m_outputStrides[j] = m_outputStrides[j-1] * m_dimensions[j-1];
}
} else {
m_leftStrides[NumDims - 1] = 1;
m_rightStrides[NumDims - 1] = 1;
m_outputStrides[NumDims - 1] = 1;
for (int j = NumDims - 2; j >= 0; --j) {
m_leftStrides[j] = m_leftStrides[j+1] * lhs_dims[j+1];
m_rightStrides[j] = m_rightStrides[j+1] * rhs_dims[j+1];
m_outputStrides[j] = m_outputStrides[j+1] * m_dimensions[j+1];
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
// TODO(phli): Add short-circuit memcpy evaluation if underlying data are linear?
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/)
{
m_leftImpl.evalSubExprsIfNeeded(NULL);
m_rightImpl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
{
m_leftImpl.cleanup();
m_rightImpl.cleanup();
}
// TODO(phli): attempt to speed this up. The integer divisions and modulo are slow.
// See CL/76180724 comments for more ideas.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
// Collect dimension-wise indices (subs).
array<Index, NumDims> subs;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
subs[i] = index / m_outputStrides[i];
index -= subs[i] * m_outputStrides[i];
}
subs[0] = index;
} else {
for (int i = 0; i < NumDims - 1; ++i) {
subs[i] = index / m_outputStrides[i];
index -= subs[i] * m_outputStrides[i];
}
subs[NumDims - 1] = index;
}
const Dimensions& left_dims = m_leftImpl.dimensions();
if (subs[m_axis] < left_dims[m_axis]) {
Index left_index;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
left_index = subs[0];
for (int i = 1; i < NumDims; ++i) {
left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
}
} else {
left_index = subs[NumDims - 1];
for (int i = NumDims - 2; i >= 0; --i) {
left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
}
}
return m_leftImpl.coeff(left_index);
} else {
subs[m_axis] -= left_dims[m_axis];
const Dimensions& right_dims = m_rightImpl.dimensions();
Index right_index;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
right_index = subs[0];
for (int i = 1; i < NumDims; ++i) {
right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
}
} else {
right_index = subs[NumDims - 1];
for (int i = NumDims - 2; i >= 0; --i) {
right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
}
}
return m_rightImpl.coeff(right_index);
}
}
// TODO(phli): Add a real vectorization.
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index + packetSize - 1 < dimensions().TotalSize());
EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
for (int i = 0; i < packetSize; ++i) {
values[i] = coeff(index+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
2 * TensorOpCost::MulCost<Index>() +
TensorOpCost::DivCost<Index>() +
TensorOpCost::ModCost<Index>());
const double lhs_size = m_leftImpl.dimensions().TotalSize();
const double rhs_size = m_rightImpl.dimensions().TotalSize();
return (lhs_size / (lhs_size + rhs_size)) *
m_leftImpl.costPerCoeff(vectorized) +
(rhs_size / (lhs_size + rhs_size)) *
m_rightImpl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, compute_cost);
}
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
protected:
Dimensions m_dimensions;
array<Index, NumDims> m_outputStrides;
array<Index, NumDims> m_leftStrides;
array<Index, NumDims> m_rightStrides;
TensorEvaluator<LeftArgType, Device> m_leftImpl;
TensorEvaluator<RightArgType, Device> m_rightImpl;
const Axis m_axis;
};
// Eval as lvalue
template<typename Axis, typename LeftArgType, typename RightArgType, typename Device>
struct TensorEvaluator<TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
: public TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
{
typedef TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device> Base;
typedef TensorConcatenationOp<Axis, LeftArgType, RightArgType> XprType;
typedef typename Base::Dimensions Dimensions;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(XprType& op, const Device& device)
: Base(op, device)
{
EIGEN_STATIC_ASSERT((static_cast<int>(Layout) == static_cast<int>(ColMajor)), YOU_MADE_A_PROGRAMMING_MISTAKE);
}
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
{
// Collect dimension-wise indices (subs).
array<Index, Base::NumDims> subs;
for (int i = Base::NumDims - 1; i > 0; --i) {
subs[i] = index / this->m_outputStrides[i];
index -= subs[i] * this->m_outputStrides[i];
}
subs[0] = index;
const Dimensions& left_dims = this->m_leftImpl.dimensions();
if (subs[this->m_axis] < left_dims[this->m_axis]) {
Index left_index = subs[0];
for (int i = 1; i < Base::NumDims; ++i) {
left_index += (subs[i] % left_dims[i]) * this->m_leftStrides[i];
}
return this->m_leftImpl.coeffRef(left_index);
} else {
subs[this->m_axis] -= left_dims[this->m_axis];
const Dimensions& right_dims = this->m_rightImpl.dimensions();
Index right_index = subs[0];
for (int i = 1; i < Base::NumDims; ++i) {
right_index += (subs[i] % right_dims[i]) * this->m_rightStrides[i];
}
return this->m_rightImpl.coeffRef(right_index);
}
}
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index + packetSize - 1 < this->dimensions().TotalSize());
EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
for (int i = 0; i < packetSize; ++i) {
coeffRef(index+i) = values[i];
}
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H
| 14,653 | 39.480663 | 205 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
namespace Eigen {
/** \class TensorContraction
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor contraction class.
*
*
*/
namespace internal {
template<typename Dimensions, typename LhsXprType, typename RhsXprType>
struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename gebp_traits<typename remove_const<typename LhsXprType::Scalar>::type,
typename remove_const<typename RhsXprType::Scalar>::type>::ResScalar Scalar;
typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
typename traits<RhsXprType>::StorageKind>::ret StorageKind;
typedef typename promote_index_type<typename traits<LhsXprType>::Index,
typename traits<RhsXprType>::Index>::type Index;
typedef typename LhsXprType::Nested LhsNested;
typedef typename RhsXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested;
// From NumDims below.
static const int NumDimensions = traits<RhsXprType>::NumDimensions + traits<RhsXprType>::NumDimensions - 2 * array_size<Dimensions>::value;
static const int Layout = traits<LhsXprType>::Layout;
enum {
Flags = 0
};
};
template<typename Dimensions, typename LhsXprType, typename RhsXprType>
struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType>, Eigen::Dense>
{
typedef const TensorContractionOp<Dimensions, LhsXprType, RhsXprType>& type;
};
template<typename Dimensions, typename LhsXprType, typename RhsXprType>
struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType>, 1, typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType> >::type>
{
typedef TensorContractionOp<Dimensions, LhsXprType, RhsXprType> type;
};
template<typename Indices_, typename LeftArgType_, typename RightArgType_, typename Device_>
struct traits<TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_>, Device_> > {
typedef Indices_ Indices;
typedef LeftArgType_ LeftArgType;
typedef RightArgType_ RightArgType;
typedef Device_ Device;
// From NumDims below.
static const int NumDimensions = traits<LeftArgType_>::NumDimensions + traits<RightArgType_>::NumDimensions - 2 * array_size<Indices_>::value;
};
} // end namespace internal
template<typename Indices, typename LhsXprType, typename RhsXprType>
class TensorContractionOp : public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorContractionOp>::Scalar Scalar;
typedef typename internal::gebp_traits<typename LhsXprType::CoeffReturnType,
typename RhsXprType::CoeffReturnType>::ResScalar CoeffReturnType;
typedef typename Eigen::internal::nested<TensorContractionOp>::type Nested;
typedef typename Eigen::internal::traits<TensorContractionOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorContractionOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp(
const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims)
: m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims) {}
EIGEN_DEVICE_FUNC
const Indices& indices() const { return m_indices; }
/** \returns the nested expressions */
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename LhsXprType::Nested>::type&
lhsExpression() const { return m_lhs_xpr; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename RhsXprType::Nested>::type&
rhsExpression() const { return m_rhs_xpr; }
protected:
typename LhsXprType::Nested m_lhs_xpr;
typename RhsXprType::Nested m_rhs_xpr;
const Indices m_indices;
};
template<typename Derived>
struct TensorContractionEvaluatorBase
{
typedef typename internal::traits<Derived>::Indices Indices;
typedef typename internal::traits<Derived>::LeftArgType LeftArgType;
typedef typename internal::traits<Derived>::RightArgType RightArgType;
typedef typename internal::traits<Derived>::Device Device;
typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
IsAligned = true,
PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = true
};
// Most of the code is assuming that both input tensors are ColMajor. If the
// inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
// If we want to compute A * B = C, where A is LHS and B is RHS, the code
// will pretend B is LHS and A is RHS.
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
static const int LDims =
internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
static const int RDims =
internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
static const int ContractDims = internal::array_size<Indices>::value;
static const int NumDims = LDims + RDims - 2 * ContractDims;
typedef array<Index, ContractDims> contract_t;
typedef array<Index, LDims - ContractDims> left_nocontract_t;
typedef array<Index, RDims - ContractDims> right_nocontract_t;
typedef DSizes<Index, NumDims> Dimensions;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorContractionEvaluatorBase(const XprType& op, const Device& device)
: m_leftImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
op.lhsExpression(), op.rhsExpression()), device),
m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
op.rhsExpression(), op.lhsExpression()), device),
m_device(device),
m_result(NULL) {
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
YOU_MADE_A_PROGRAMMING_MISTAKE);
DSizes<Index, LDims> eval_left_dims;
DSizes<Index, RDims> eval_right_dims;
array<IndexPair<Index>, ContractDims> eval_op_indices;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
// For ColMajor, we keep using the existing dimensions
for (int i = 0; i < LDims; i++) {
eval_left_dims[i] = m_leftImpl.dimensions()[i];
}
for (int i = 0; i < RDims; i++) {
eval_right_dims[i] = m_rightImpl.dimensions()[i];
}
// We keep the pairs of contracting indices.
for (int i = 0; i < ContractDims; i++) {
eval_op_indices[i].first = op.indices()[i].first;
eval_op_indices[i].second = op.indices()[i].second;
}
} else {
// For RowMajor, we need to reverse the existing dimensions
for (int i = 0; i < LDims; i++) {
eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1];
}
for (int i = 0; i < RDims; i++) {
eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1];
}
// We need to flip all the pairs of contracting indices as well as
// reversing the dimensions.
for (int i = 0; i < ContractDims; i++) {
eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second;
eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first;
}
}
// Check for duplicate axes and make sure the first index in eval_op_indices
// is increasing. Using O(n^2) sorting is OK since ContractDims is small
for (int i = 0; i < ContractDims; i++) {
for (int j = i + 1; j < ContractDims; j++) {
eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first &&
eval_op_indices[j].second != eval_op_indices[i].second &&
"contraction axes should be unique");
if (eval_op_indices[j].first < eval_op_indices[i].first) {
numext::swap(eval_op_indices[j], eval_op_indices[i]);
}
}
}
array<Index, LDims> lhs_strides;
lhs_strides[0] = 1;
for (int i = 0; i < LDims-1; ++i) {
lhs_strides[i+1] = lhs_strides[i] * eval_left_dims[i];
}
array<Index, RDims> rhs_strides;
rhs_strides[0] = 1;
for (int i = 0; i < RDims-1; ++i) {
rhs_strides[i+1] = rhs_strides[i] * eval_right_dims[i];
}
if (m_i_strides.size() > 0) m_i_strides[0] = 1;
if (m_j_strides.size() > 0) m_j_strides[0] = 1;
if (m_k_strides.size() > 0) m_k_strides[0] = 1;
m_i_size = 1;
m_j_size = 1;
m_k_size = 1;
// To compute the dimension, we simply concatenate the non-contracting
// dimensions of the left and then the right tensor. Additionally, we also
// compute the strides corresponding to the left non-contracting
// dimensions and right non-contracting dimensions.
m_lhs_inner_dim_contiguous = true;
int dim_idx = 0;
unsigned int nocontract_idx = 0;
for (int i = 0; i < LDims; i++) {
// find if we are contracting on index i of left tensor
bool contracting = false;
for (int j = 0; j < ContractDims; j++) {
if (eval_op_indices[j].first == i) {
contracting = true;
break;
}
}
if (!contracting) {
// add dimension size to output dimensions
m_dimensions[dim_idx] = eval_left_dims[i];
m_left_nocontract_strides[nocontract_idx] = lhs_strides[i];
if (dim_idx != i) {
m_lhs_inner_dim_contiguous = false;
}
if (nocontract_idx+1 < internal::array_size<left_nocontract_t>::value) {
m_i_strides[nocontract_idx+1] =
m_i_strides[nocontract_idx] * eval_left_dims[i];
} else {
m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i];
}
dim_idx++;
nocontract_idx++;
}
}
nocontract_idx = 0;
for (int i = 0; i < RDims; i++) {
bool contracting = false;
// find if we are contracting on index i of right tensor
for (int j = 0; j < ContractDims; j++) {
if (eval_op_indices[j].second == i) {
contracting = true;
break;
}
}
if (!contracting) {
m_dimensions[dim_idx] = eval_right_dims[i];
if (nocontract_idx+1 < internal::array_size<right_nocontract_t>::value) {
m_j_strides[nocontract_idx+1] =
m_j_strides[nocontract_idx] * eval_right_dims[i];
} else {
m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i];
}
m_right_nocontract_strides[nocontract_idx] = rhs_strides[i];
dim_idx++;
nocontract_idx++;
}
}
// Now compute the strides corresponding to the contracting dimensions. We
// assumed above that non-contracting axes are represented in the same order
// in the matrix as they are in the tensor. This is not the case for
// contracting axes. As the contracting axes must be of the same size in
// each tensor, we'll only look at the first tensor here.
m_rhs_inner_dim_contiguous = true;
m_rhs_inner_dim_reordered = false;
for (int i = 0; i < ContractDims; i++) {
Index left = eval_op_indices[i].first;
Index right = eval_op_indices[i].second;
Index size = eval_left_dims[left];
eigen_assert(size == eval_right_dims[right] &&
"Contraction axes must be same size");
if (i+1 < static_cast<int>(internal::array_size<contract_t>::value)) {
m_k_strides[i+1] = m_k_strides[i] * size;
} else {
m_k_size = m_k_strides[i] * size;
}
m_left_contracting_strides[i] = lhs_strides[left];
m_right_contracting_strides[i] = rhs_strides[right];
if (i > 0 && right < eval_op_indices[i-1].second) {
m_rhs_inner_dim_reordered = true;
}
if (right != i) {
m_rhs_inner_dim_contiguous = false;
}
}
// If the layout is RowMajor, we need to reverse the m_dimensions
if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) {
for (int i = 0, j = NumDims - 1; i < j; i++, j--) {
numext::swap(m_dimensions[i], m_dimensions[j]);
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
m_leftImpl.evalSubExprsIfNeeded(NULL);
m_rightImpl.evalSubExprsIfNeeded(NULL);
if (data) {
evalTo(data);
return false;
} else {
m_result = static_cast<Scalar *>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
evalTo(m_result);
return true;
}
}
EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
if (this->m_lhs_inner_dim_contiguous) {
if (this->m_rhs_inner_dim_contiguous) {
if (this->m_rhs_inner_dim_reordered) {
static_cast<const Derived*>(this)->template evalProduct<true, true, true, Unaligned>(buffer);
}
else {
static_cast<const Derived*>(this)->template evalProduct<true, true, false, Unaligned>(buffer);
}
}
else {
if (this->m_rhs_inner_dim_reordered) {
static_cast<const Derived*>(this)->template evalProduct<true, false, true, Unaligned>(buffer);
}
else {
static_cast<const Derived*>(this)->template evalProduct<true, false, false, Unaligned>(buffer);
}
}
}
else {
if (this->m_rhs_inner_dim_contiguous) {
if (this->m_rhs_inner_dim_reordered) {
static_cast<const Derived*>(this)->template evalProduct<false, true, true, Unaligned>(buffer);
}
else {
static_cast<const Derived*>(this)->template evalProduct<false, true, false, Unaligned>(buffer);
}
}
else {
if (this->m_rhs_inner_dim_reordered) {
static_cast<const Derived*>(this)->template evalProduct<false, false, true, Unaligned>(buffer);
}
else {
static_cast<const Derived*>(this)->template evalProduct<false, false, false, Unaligned>(buffer);
}
}
}
}
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
EIGEN_DEVICE_FUNC void evalGemv(Scalar* buffer) const {
const Index rows = m_i_size;
const Index cols = m_k_size;
typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned;
const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned;
typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
LeftEvaluator, left_nocontract_t,
contract_t, lhs_packet_size,
lhs_inner_dim_contiguous,
false, lhs_alignment> LhsMapper;
typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
RightEvaluator, right_nocontract_t,
contract_t, rhs_packet_size,
rhs_inner_dim_contiguous,
rhs_inner_dim_reordered, rhs_alignment> RhsMapper;
LhsMapper lhs(m_leftImpl, m_left_nocontract_strides, m_i_strides,
m_left_contracting_strides, m_k_strides);
RhsMapper rhs(m_rightImpl, m_right_nocontract_strides, m_j_strides,
m_right_contracting_strides, m_k_strides);
const Scalar alpha(1);
const Index resIncr(1);
// zero out the result buffer (which must be of size at least rows * sizeof(Scalar)
m_device.memset(buffer, 0, rows * sizeof(Scalar));
internal::general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,false,RhsScalar,RhsMapper,false>::run(
rows, cols, lhs, rhs,
buffer, resIncr, alpha);
}
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
EIGEN_DEVICE_FUNC void evalGemm(Scalar* buffer) const {
// columns in left side, rows in right side
const Index k = this->m_k_size;
// rows in left side
const Index m = this->m_i_size;
// columns in right side
const Index n = this->m_j_size;
// zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
// define mr, nr, and all of my data mapper types
typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
const Index nr = Traits::nr;
const Index mr = Traits::mr;
typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
const Index lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
const Index rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
LeftEvaluator, left_nocontract_t,
contract_t, lhs_packet_size,
lhs_inner_dim_contiguous,
false, Unaligned> LhsMapper;
typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
RightEvaluator, right_nocontract_t,
contract_t, rhs_packet_size,
rhs_inner_dim_contiguous,
rhs_inner_dim_reordered, Unaligned> RhsMapper;
typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
// Declare GEBP packing and kernel structs
internal::gemm_pack_lhs<LhsScalar, Index, typename LhsMapper::SubMapper, mr, Traits::LhsProgress, ColMajor> pack_lhs;
internal::gemm_pack_rhs<RhsScalar, Index, typename RhsMapper::SubMapper, nr, ColMajor> pack_rhs;
internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper, mr, nr, false, false> gebp;
// initialize data mappers
LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
this->m_left_contracting_strides, this->m_k_strides);
RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
this->m_right_contracting_strides, this->m_k_strides);
OutputMapper output(buffer, m);
// Sizes of the blocks to load in cache. See the Goto paper for details.
internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index, internal::ShardByCol> blocking(k, m, n, 1);
const Index kc = blocking.kc();
const Index mc = numext::mini(m, blocking.mc());
const Index nc = numext::mini(n, blocking.nc());
const Index sizeA = mc * kc;
const Index sizeB = kc * nc;
LhsScalar* blockA = static_cast<LhsScalar *>(this->m_device.allocate(sizeA * sizeof(LhsScalar)));
RhsScalar* blockB = static_cast<RhsScalar *>(this->m_device.allocate(sizeB * sizeof(RhsScalar)));
for(Index i2=0; i2<m; i2+=mc)
{
const Index actual_mc = numext::mini(i2+mc,m)-i2;
for (Index k2 = 0; k2 < k; k2 += kc) {
// make sure we don't overshoot right edge of left matrix, then pack vertical panel
const Index actual_kc = numext::mini(k2 + kc, k) - k2;
pack_lhs(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc, 0, 0);
// series of horizontal blocks
for (Index j2 = 0; j2 < n; j2 += nc) {
// make sure we don't overshoot right edge of right matrix, then pack block
const Index actual_nc = numext::mini(j2 + nc, n) - j2;
pack_rhs(blockB, rhs.getSubMapper(k2, j2), actual_kc, actual_nc, 0, 0);
// call gebp (matrix kernel)
// The parameters here are copied from Eigen's GEMM implementation
gebp(output.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, Scalar(1), -1, -1, 0, 0);
}
}
}
this->m_device.deallocate(blockA);
this->m_device.deallocate(blockB);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_leftImpl.cleanup();
m_rightImpl.cleanup();
if (m_result != NULL) {
m_device.deallocate(m_result);
m_result = NULL;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
return m_result[index];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const { return m_result; }
protected:
// Prevent assignment
TensorContractionEvaluatorBase& operator = (const TensorContractionEvaluatorBase&);
Dimensions m_dimensions;
contract_t m_k_strides;
contract_t m_left_contracting_strides;
contract_t m_right_contracting_strides;
bool m_lhs_inner_dim_contiguous;
bool m_rhs_inner_dim_contiguous;
bool m_rhs_inner_dim_reordered;
left_nocontract_t m_i_strides;
right_nocontract_t m_j_strides;
left_nocontract_t m_left_nocontract_strides;
right_nocontract_t m_right_nocontract_strides;
Index m_i_size;
Index m_j_size;
Index m_k_size;
TensorEvaluator<EvalLeftArgType, Device> m_leftImpl;
TensorEvaluator<EvalRightArgType, Device> m_rightImpl;
const Device& m_device;
Scalar* m_result;
};
// evaluator for default device
template<typename Indices, typename LeftArgType, typename RightArgType, typename Device>
struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> :
public TensorContractionEvaluatorBase<
TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> > {
typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
typedef TensorContractionEvaluatorBase<Self> Base;
typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
Layout = TensorEvaluator<LeftArgType, Device>::Layout
};
// Most of the code is assuming that both input tensors are ColMajor. If the
// inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
// If we want to compute A * B = C, where A is LHS and B is RHS, the code
// will pretend B is LHS and A is RHS.
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
static const int LDims =
internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
static const int RDims =
internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
static const int ContractDims = internal::array_size<Indices>::value;
typedef array<Index, ContractDims> contract_t;
typedef array<Index, LDims - ContractDims> left_nocontract_t;
typedef array<Index, RDims - ContractDims> right_nocontract_t;
static const int NumDims = LDims + RDims - 2 * ContractDims;
// Could we use NumDimensions here?
typedef DSizes<Index, NumDims> Dimensions;
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
Base(op, device) { }
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
EIGEN_DEVICE_FUNC void evalProduct(Scalar* buffer) const {
if (this->m_j_size == 1) {
this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
return;
}
this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
| 26,680 | 41.418124 | 152 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
namespace Eigen {
namespace internal {
enum {
ShardByRow = 0,
ShardByCol = 1
};
// Default Blocking Strategy
template <typename LhsMapper, typename RhsMapper, typename Index, int ShardingType=ShardByCol>
class TensorContractionBlocking {
public:
typedef typename LhsMapper::Scalar LhsScalar;
typedef typename RhsMapper::Scalar RhsScalar;
EIGEN_DEVICE_FUNC TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1) :
kc_(k), mc_(m), nc_(n)
{
if (ShardingType == ShardByCol) {
computeProductBlockingSizes<LhsScalar, RhsScalar, 1>(kc_, mc_, nc_, num_threads);
}
else {
computeProductBlockingSizes<LhsScalar, RhsScalar, 1>(kc_, nc_, mc_, num_threads);
}
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index kc() const { return kc_; }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index mc() const { return mc_; }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index nc() const { return nc_; }
private:
Index kc_;
Index mc_;
Index nc_;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
| 1,594 | 26.982456 | 97 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014-2015 Benoit Steiner <[email protected]>
// Copyright (C) 2015 Navdeep Jaitly <[email protected]>
// Copyright (C) 2014 Eric Martin <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
namespace Eigen {
template<typename Scalar, typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper, bool needs_edge_check>
__device__ EIGEN_STRONG_INLINE void
EigenContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output, Scalar* lhs_shmem, Scalar* rhs_shmem,
const Index m_size, const Index n_size, const Index k_size) {
const Index m_block_idx = blockIdx.x;
const Index n_block_idx = blockIdx.y;
const Index base_m = 64 * m_block_idx;
const Index base_n = 64 * n_block_idx;
// declare and initialize 64 registers for output 8x8 block
// prefetch registers
Scalar lhs_pf0;
Scalar lhs_pf1;
Scalar lhs_pf2;
Scalar lhs_pf3;
Scalar lhs_pf4;
Scalar lhs_pf5;
Scalar lhs_pf6;
Scalar lhs_pf7;
Scalar rhs_pf0;
Scalar rhs_pf1;
Scalar rhs_pf2;
Scalar rhs_pf3;
Scalar rhs_pf4;
Scalar rhs_pf5;
Scalar rhs_pf6;
Scalar rhs_pf7;
// shared memory is formatted
// (contract idx in block, nocontract idx in block, block idx)
// where block idx is column major. This transposition limits the number of
// bank conflicts when reading the LHS. The core idea is that since the contracting
// index is shared by both sides, then the contracting index should be in threadIdx.x.
// On the LHS, we pad each row inside of each block with an extra element. This makes
// each block 8 rows of 9 elements, which is 72 elements. This gives no bank conflicts
// on writes and very few 2-way conflicts on reads. There is an 8x8 grid of these blocks.
// On the RHS we just add 8 padding elements to the end of each block. This gives no bank
// conflicts on writes and also none on reads.
// storage indices
const Index lhs_store_idx_base = threadIdx.y * 72 + threadIdx.x * 9 + threadIdx.z;
const Index rhs_store_idx_base = threadIdx.y * 72 + threadIdx.z * 8 + threadIdx.x;
const Index lhs_store_idx_0 = lhs_store_idx_base + 576 * 0;
const Index lhs_store_idx_1 = lhs_store_idx_base + 576 * 1;
const Index lhs_store_idx_2 = lhs_store_idx_base + 576 * 2;
const Index lhs_store_idx_3 = lhs_store_idx_base + 576 * 3;
const Index lhs_store_idx_4 = lhs_store_idx_base + 576 * 4;
const Index lhs_store_idx_5 = lhs_store_idx_base + 576 * 5;
const Index lhs_store_idx_6 = lhs_store_idx_base + 576 * 6;
const Index lhs_store_idx_7 = lhs_store_idx_base + 576 * 7;
const Index rhs_store_idx_0 = rhs_store_idx_base + 576 * 0;
const Index rhs_store_idx_1 = rhs_store_idx_base + 576 * 1;
const Index rhs_store_idx_2 = rhs_store_idx_base + 576 * 2;
const Index rhs_store_idx_3 = rhs_store_idx_base + 576 * 3;
const Index rhs_store_idx_4 = rhs_store_idx_base + 576 * 4;
const Index rhs_store_idx_5 = rhs_store_idx_base + 576 * 5;
const Index rhs_store_idx_6 = rhs_store_idx_base + 576 * 6;
const Index rhs_store_idx_7 = rhs_store_idx_base + 576 * 7;
// in the loading code, the following variables are important:
// threadIdx.x: the vertical position in an 8x8 block
// threadIdx.y: the vertical index of the 8x8 block in the grid
// threadIdx.z: the horizontal position in an 8x8 block
// k: the horizontal index of the 8x8 block in the grid
//
// The k parameter is implicit (it was the loop counter for a loop that went
// from 0 to <8, but now that loop is unrolled in the below code.
const Index load_idx_vert = threadIdx.x + 8 * threadIdx.y;
const Index lhs_vert = base_m + load_idx_vert;
#define prefetchIntoRegisters(base_k) \
{ \
lhs_pf0 = conv(0); \
lhs_pf1 = conv(0); \
lhs_pf2 = conv(0); \
lhs_pf3 = conv(0); \
lhs_pf4 = conv(0); \
lhs_pf5 = conv(0); \
lhs_pf6 = conv(0); \
lhs_pf7 = conv(0); \
\
rhs_pf0 = conv(0); \
rhs_pf1 = conv(0); \
rhs_pf2 = conv(0); \
rhs_pf3 = conv(0); \
rhs_pf4 = conv(0); \
rhs_pf5 = conv(0); \
rhs_pf6 = conv(0); \
rhs_pf7 = conv(0); \
\
if (!needs_edge_check || lhs_vert < m_size) { \
const Index lhs_horiz_0 = base_k + threadIdx.z + 0 * 8; \
const Index lhs_horiz_1 = base_k + threadIdx.z + 1 * 8; \
const Index lhs_horiz_2 = base_k + threadIdx.z + 2 * 8; \
const Index lhs_horiz_3 = base_k + threadIdx.z + 3 * 8; \
const Index lhs_horiz_4 = base_k + threadIdx.z + 4 * 8; \
const Index lhs_horiz_5 = base_k + threadIdx.z + 5 * 8; \
const Index lhs_horiz_6 = base_k + threadIdx.z + 6 * 8; \
const Index lhs_horiz_7 = base_k + threadIdx.z + 7 * 8; \
\
if (!needs_edge_check || lhs_horiz_7 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
lhs_pf7 = lhs(lhs_vert, lhs_horiz_7); \
} else if (lhs_horiz_6 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \
} else if (lhs_horiz_5 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \
} else if (lhs_horiz_4 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \
} else if (lhs_horiz_3 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \
} else if (lhs_horiz_2 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \
} else if (lhs_horiz_1 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \
} else if (lhs_horiz_0 < k_size) { \
lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \
} \
} \
\
const Index rhs_vert = base_k + load_idx_vert; \
if (!needs_edge_check || rhs_vert < k_size) { \
const Index rhs_horiz_0 = base_n + threadIdx.z + 0 * 8; \
const Index rhs_horiz_1 = base_n + threadIdx.z + 1 * 8; \
const Index rhs_horiz_2 = base_n + threadIdx.z + 2 * 8; \
const Index rhs_horiz_3 = base_n + threadIdx.z + 3 * 8; \
const Index rhs_horiz_4 = base_n + threadIdx.z + 4 * 8; \
const Index rhs_horiz_5 = base_n + threadIdx.z + 5 * 8; \
const Index rhs_horiz_6 = base_n + threadIdx.z + 6 * 8; \
const Index rhs_horiz_7 = base_n + threadIdx.z + 7 * 8; \
\
if (rhs_horiz_7 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
rhs_pf7 = rhs(rhs_vert, rhs_horiz_7); \
} else if (rhs_horiz_6 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \
} else if (rhs_horiz_5 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \
} else if (rhs_horiz_4 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \
} else if (rhs_horiz_3 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \
} else if (rhs_horiz_2 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \
} else if (rhs_horiz_1 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \
} else if (rhs_horiz_0 < n_size) { \
rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \
} \
} \
} \
#define writeRegToShmem(_) \
lhs_shmem[lhs_store_idx_0] = lhs_pf0; \
rhs_shmem[rhs_store_idx_0] = rhs_pf0; \
\
lhs_shmem[lhs_store_idx_1] = lhs_pf1; \
rhs_shmem[rhs_store_idx_1] = rhs_pf1; \
\
lhs_shmem[lhs_store_idx_2] = lhs_pf2; \
rhs_shmem[rhs_store_idx_2] = rhs_pf2; \
\
lhs_shmem[lhs_store_idx_3] = lhs_pf3; \
rhs_shmem[rhs_store_idx_3] = rhs_pf3; \
\
lhs_shmem[lhs_store_idx_4] = lhs_pf4; \
rhs_shmem[rhs_store_idx_4] = rhs_pf4; \
\
lhs_shmem[lhs_store_idx_5] = lhs_pf5; \
rhs_shmem[rhs_store_idx_5] = rhs_pf5; \
\
lhs_shmem[lhs_store_idx_6] = lhs_pf6; \
rhs_shmem[rhs_store_idx_6] = rhs_pf6; \
\
lhs_shmem[lhs_store_idx_7] = lhs_pf7; \
rhs_shmem[rhs_store_idx_7] = rhs_pf7; \
// declare and initialize result array
#define res(i, j) _res_##i##j
#define initResultRow(i) \
Scalar res(i, 0) = conv(0); \
Scalar res(i, 1) = conv(0); \
Scalar res(i, 2) = conv(0); \
Scalar res(i, 3) = conv(0); \
Scalar res(i, 4) = conv(0); \
Scalar res(i, 5) = conv(0); \
Scalar res(i, 6) = conv(0); \
Scalar res(i, 7) = conv(0); \
internal::scalar_cast_op<int, Scalar> conv;
initResultRow(0);
initResultRow(1);
initResultRow(2);
initResultRow(3);
initResultRow(4);
initResultRow(5);
initResultRow(6);
initResultRow(7);
#undef initResultRow
for (Index base_k = 0; base_k < k_size; base_k += 64) {
// wait for previous iteration to finish with shmem. Despite common sense,
// the code is a bit faster with this here then at bottom of loop
__syncthreads();
prefetchIntoRegisters(base_k);
writeRegToShmem();
#undef prefetchIntoRegisters
#undef writeRegToShmem
// wait for shared mem packing to be done before starting computation
__syncthreads();
// compute 8x8 matrix product by outer product. This involves packing one column
// of LHS and one row of RHS into registers (takes 16 registers).
#define lcol(i) _lcol##i
Scalar lcol(0);
Scalar lcol(1);
Scalar lcol(2);
Scalar lcol(3);
Scalar lcol(4);
Scalar lcol(5);
Scalar lcol(6);
Scalar lcol(7);
#define rrow(j) _rrow##j
Scalar rrow(0);
Scalar rrow(1);
Scalar rrow(2);
Scalar rrow(3);
Scalar rrow(4);
Scalar rrow(5);
Scalar rrow(6);
Scalar rrow(7);
// Now x corresponds to k, y to m, and z to n
const Scalar* lhs_block = &lhs_shmem[threadIdx.x + 9 * threadIdx.y];
const Scalar* rhs_block = &rhs_shmem[threadIdx.x + 8 * threadIdx.z];
#define lhs_element(i, j) lhs_block[72 * ((i) + 8 * (j))]
#define rhs_element(i, j) rhs_block[72 * ((i) + 8 * (j))]
#define loadData(i, j) \
lcol(0) = lhs_element(0, j); \
rrow(0) = rhs_element(i, 0); \
lcol(1) = lhs_element(1, j); \
rrow(1) = rhs_element(i, 1); \
lcol(2) = lhs_element(2, j); \
rrow(2) = rhs_element(i, 2); \
lcol(3) = lhs_element(3, j); \
rrow(3) = rhs_element(i, 3); \
lcol(4) = lhs_element(4, j); \
rrow(4) = rhs_element(i, 4); \
lcol(5) = lhs_element(5, j); \
rrow(5) = rhs_element(i, 5); \
lcol(6) = lhs_element(6, j); \
rrow(6) = rhs_element(i, 6); \
lcol(7) = lhs_element(7, j); \
rrow(7) = rhs_element(i, 7); \
#define computeCol(j) \
res(0, j) += lcol(0) * rrow(j); \
res(1, j) += lcol(1) * rrow(j); \
res(2, j) += lcol(2) * rrow(j); \
res(3, j) += lcol(3) * rrow(j); \
res(4, j) += lcol(4) * rrow(j); \
res(5, j) += lcol(5) * rrow(j); \
res(6, j) += lcol(6) * rrow(j); \
res(7, j) += lcol(7) * rrow(j); \
#define computePass(i) \
loadData(i, i); \
\
computeCol(0); \
computeCol(1); \
computeCol(2); \
computeCol(3); \
computeCol(4); \
computeCol(5); \
computeCol(6); \
computeCol(7); \
computePass(0);
computePass(1);
computePass(2);
computePass(3);
computePass(4);
computePass(5);
computePass(6);
computePass(7);
#undef lcol
#undef rrow
#undef lhs_element
#undef rhs_element
#undef loadData
#undef computeCol
#undef computePass
} // end loop over k
// we've now iterated over all of the large (ie width 64) k blocks and
// accumulated results in registers. At this point thread (x, y, z) contains
// the sum across all big k blocks of the product of little k block of index (x, y)
// with block of index (y, z). To compute the final output, we need to reduce
// the 8 threads over y by summation.
#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor(res(i, j), mask)
#define reduceRow(i, mask) \
shuffleInc(i, 0, mask); \
shuffleInc(i, 1, mask); \
shuffleInc(i, 2, mask); \
shuffleInc(i, 3, mask); \
shuffleInc(i, 4, mask); \
shuffleInc(i, 5, mask); \
shuffleInc(i, 6, mask); \
shuffleInc(i, 7, mask); \
#define reduceMatrix(mask) \
reduceRow(0, mask); \
reduceRow(1, mask); \
reduceRow(2, mask); \
reduceRow(3, mask); \
reduceRow(4, mask); \
reduceRow(5, mask); \
reduceRow(6, mask); \
reduceRow(7, mask); \
// actually perform the reduction, now each thread of index (_, y, z)
// contains the correct values in its registers that belong in the output
// block
reduceMatrix(1);
reduceMatrix(2);
reduceMatrix(4);
#undef shuffleInc
#undef reduceRow
#undef reduceMatrix
// now we need to copy the 64 values into main memory. We can't split work
// among threads because all variables are in registers. There's 2 ways
// to do this:
// (1) have 1 thread do 64 writes from registers into global memory
// (2) have 1 thread do 64 writes into shared memory, and then 8 threads
// each do 8 writes into global memory. We can just overwrite the shared
// memory from the problem we just solved.
// (2) is slightly faster than (1) due to less branching and more ILP
// TODO: won't yield much gain, but could just use currently unused shared mem
// and then we won't have to sync
// wait for shared mem to be out of use
__syncthreads();
#define writeResultShmem(i, j) \
lhs_shmem[i + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j] = res(i, j); \
#define writeRow(i) \
writeResultShmem(i, 0); \
writeResultShmem(i, 1); \
writeResultShmem(i, 2); \
writeResultShmem(i, 3); \
writeResultShmem(i, 4); \
writeResultShmem(i, 5); \
writeResultShmem(i, 6); \
writeResultShmem(i, 7); \
if (threadIdx.x == 0) {
writeRow(0);
writeRow(1);
writeRow(2);
writeRow(3);
writeRow(4);
writeRow(5);
writeRow(6);
writeRow(7);
}
#undef writeResultShmem
#undef writeRow
const int max_i_write = numext::mini((int)((m_size - base_m - threadIdx.y + 7) / 8), 8);
const int max_j_write = numext::mini((int)((n_size - base_n - threadIdx.z + 7) / 8), 8);
if (threadIdx.x < max_i_write) {
if (max_j_write == 8) {
// TODO: can i trade bank conflicts for coalesced writes?
Scalar val0 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 0];
Scalar val1 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 1];
Scalar val2 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 2];
Scalar val3 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 3];
Scalar val4 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 4];
Scalar val5 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 5];
Scalar val6 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 6];
Scalar val7 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 7];
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 0) = val0;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 1) = val1;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 2) = val2;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 3) = val3;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 4) = val4;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 5) = val5;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 6) = val6;
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 7) = val7;
} else {
#pragma unroll 7
for (int j = 0; j < max_j_write; j++) {
Scalar val = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j];
output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * j) = val;
}
}
}
#undef res
}
template<typename Scalar, typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper>
__global__ void
__launch_bounds__(512)
EigenContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output,
const Index m_size, const Index n_size, const Index k_size) {
__shared__ Scalar lhs_shmem[72 * 64];
__shared__ Scalar rhs_shmem[72 * 64];
const Index m_block_idx = blockIdx.x;
const Index n_block_idx = blockIdx.y;
const Index base_m = 64 * m_block_idx;
const Index base_n = 64 * n_block_idx;
if (base_m + 63 < m_size && base_n + 63 < n_size) {
EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
} else {
EigenContractionKernelInternal<Scalar, Index, LhsMapper, RhsMapper, OutputMapper, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size);
}
}
template<typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
bool CHECK_RHS_BOUNDARY>
__device__ EIGEN_STRONG_INLINE void
EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output, float2 lhs_shmem2[][16],
float2 rhs_shmem2[][8], const Index m_size,
const Index n_size, const Index k_size,
const Index base_m, const Index base_n) {
typedef float Scalar;
// prefetch registers
float4 lhs_pf0, rhs_pf0;
float4 results[4];
for (int i=0; i < 4; i++) {
results[i].x = results[i].y = results[i].z = results[i].w = 0;
}
#define prefetch_lhs(reg, row, col) \
if (!CHECK_LHS_BOUNDARY) { \
if (col < k_size) { \
reg =lhs.loadPacket<Unaligned>(row, col); \
} \
} else { \
if (col < k_size) { \
if (row + 3 < m_size) { \
reg =lhs.loadPacket<Unaligned>(row, col); \
} else if (row + 2 < m_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
reg.z =lhs(row + 2, col); \
} else if (row + 1 < m_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
} else if (row < m_size) { \
reg.x =lhs(row + 0, col); \
} \
} \
} \
Index lhs_vert = base_m+threadIdx.x*4;
for (Index k = 0; k < k_size; k += 16) {
lhs_pf0 = internal::pset1<float4>(0);
rhs_pf0 = internal::pset1<float4>(0);
Index lhs_horiz = threadIdx.y+k;
prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz)
Index rhs_vert = k+(threadIdx.x%4)*4;
Index rhs_horiz0 = (threadIdx.x>>2)+threadIdx.y*4+base_n;
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
} else if (rhs_vert + 1 < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
} else if (rhs_vert < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
}
} else {
if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if ((rhs_vert + 2) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
} else if ((rhs_vert + 1) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
} else if (rhs_vert < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
}
}
}
float x1, x2 ;
// the following can be a bitwise operation..... some day.
if((threadIdx.x%8) < 4) {
x1 = rhs_pf0.y;
x2 = rhs_pf0.w;
} else {
x1 = rhs_pf0.x;
x2 = rhs_pf0.z;
}
x1 = __shfl_xor(x1, 4);
x2 = __shfl_xor(x2, 4);
if((threadIdx.x%8) < 4) {
rhs_pf0.y = x1;
rhs_pf0.w = x2;
} else {
rhs_pf0.x = x1;
rhs_pf0.z = x2;
}
// We have 64 features.
// Row 0 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 0, 1.
// Row 1 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 2, 3.
// ...
// Row 31 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 62, 63
// Row 32 -> times (2, 6, 10, 14, 3, 7, 11, 15) for features 0, 1
// ...
rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2][threadIdx.x%8] = make_float2(rhs_pf0.x, rhs_pf0.y);
rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2+32][threadIdx.x%8] = make_float2(rhs_pf0.z, rhs_pf0.w);
// Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
// Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
// ...
// Row 15 (time 15) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61)
// Row 16 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63)
// ...
lhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(lhs_pf0.x, lhs_pf0.y);
lhs_shmem2[threadIdx.y+16][threadIdx.x] = make_float2(lhs_pf0.z, lhs_pf0.w);
#define add_vals(fl1, fl2, fr1, fr2)\
results[0].x += fl1.x * fr1.x;\
results[0].y += fl1.y * fr1.x;\
results[0].z += fl2.x * fr1.x;\
results[0].w += fl2.y * fr1.x;\
\
results[1].x += fl1.x * fr1.y;\
results[1].y += fl1.y * fr1.y;\
results[1].z += fl2.x * fr1.y;\
results[1].w += fl2.y * fr1.y;\
\
results[2].x += fl1.x * fr2.x;\
results[2].y += fl1.y * fr2.x;\
results[2].z += fl2.x * fr2.x;\
results[2].w += fl2.y * fr2.x;\
\
results[3].x += fl1.x * fr2.y;\
results[3].y += fl1.y * fr2.y;\
results[3].z += fl2.x * fr2.y;\
results[3].w += fl2.y * fr2.y;\
__syncthreads();
// Do the multiplies.
#pragma unroll
for (int koff = 0; koff < 16; koff ++) {
// 32 x threads.
float2 fl1 = lhs_shmem2[koff][threadIdx.x];
float2 fl2 = lhs_shmem2[koff + 16][threadIdx.x];
int start_feature = threadIdx.y * 4;
float2 fr1 = rhs_shmem2[(start_feature>>1) + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
float2 fr2 = rhs_shmem2[(start_feature>>1) + 1 + 32*((koff%4)/2)][koff/4 + (koff%2)*4];
add_vals(fl1, fl2, fr1, fr2)
}
__syncthreads();
}
#undef prefetch_lhs
#undef add_vals
Index horiz_base = threadIdx.y*4+base_n;
if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
} else if (!CHECK_RHS_BOUNDARY) {
// CHECK LHS
if (lhs_vert + 3 < m_size) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
} else if (lhs_vert + 2 < m_size) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
}
} else if (lhs_vert + 1 < m_size) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
}
} else if (lhs_vert < m_size) {
for (int i = 0; i < 4; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
}
}
} else if (!CHECK_LHS_BOUNDARY) {
// CHECK RHS
/*
int ncols_rem = fminf(n_size- horiz_base, 4);
for (int i = 0; i < ncols_rem; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}*/
for (int i = 0; i < 4; i++) {
if (horiz_base+i < n_size) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
}
} else {
// CHECK both boundaries.
for (int i = 0; i < 4; i++) {
if (horiz_base+i < n_size) {
if (lhs_vert < m_size)
output(lhs_vert, horiz_base + i) = results[i].x;
if (lhs_vert + 1 < m_size)
output(lhs_vert + 1, horiz_base + i) = results[i].y;
if (lhs_vert + 2 < m_size)
output(lhs_vert + 2, horiz_base + i) = results[i].z;
if (lhs_vert + 3 < m_size)
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
}
}
}
template<typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper, bool CHECK_LHS_BOUNDARY,
bool CHECK_RHS_BOUNDARY>
__device__ EIGEN_STRONG_INLINE void
EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output, float2 lhs_shmem2[][32],
float2 rhs_shmem2[][8], const Index m_size,
const Index n_size, const Index k_size,
const Index base_m, const Index base_n) {
typedef float Scalar;
// prefetch registers
float4 lhs_pf0, lhs_pf1, lhs_pf2, lhs_pf3;
float4 rhs_pf0, rhs_pf1;
float4 results[8];
for (int i=0; i < 8; i++) {
results[i].x = results[i].y = results[i].z = results[i].w = 0;
}
Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32;
for (Index k = 0; k < k_size; k += 32) {
lhs_pf0 = internal::pset1<float4>(0);
lhs_pf1 = internal::pset1<float4>(0);
lhs_pf2 = internal::pset1<float4>(0);
lhs_pf3 = internal::pset1<float4>(0);
rhs_pf0 = internal::pset1<float4>(0);
rhs_pf1 = internal::pset1<float4>(0);
if (!CHECK_LHS_BOUNDARY) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
}
} else {
// just CHECK_LHS_BOUNDARY
if (lhs_vert + 3 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
}
} else if (lhs_vert + 2 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
lhs_pf3.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k));
}
} else if (lhs_vert + 1 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k));
}
} else if (lhs_vert < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k));
}
}
}
__syncthreads();
Index rhs_vert = k+threadIdx.x*4;
Index rhs_horiz0 = threadIdx.y*2+base_n;
Index rhs_horiz1 = threadIdx.y*2+1+base_n;
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
} else if (rhs_vert + 1 < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
} else if (rhs_vert < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
}
} else {
if (rhs_horiz1 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1);
} else if (k+threadIdx.x*4 + 1 < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1);
} else if (k+threadIdx.x*4 < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf1.x = rhs(rhs_vert, rhs_horiz1);
}
} else if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if ((rhs_vert + 2) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0);
} else if ((rhs_vert + 1) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
} else if (rhs_vert < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
}
}
}
__syncthreads();
// Loaded. Do computation
// Row 0 -> times (0, 4, 8, .. 28) for features 0, 1.
// Row 1 -> times (0, 4, 8, .. 28) for features 2, 3.
// ..
// Row 31 -> times (0, 4, 8, .. 28) for features 62, 63
rhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(rhs_pf0.x, rhs_pf1.x);
// Row 32 -> times (1, 5, 9, .. 29) for features 0, 1.
// Row 33 -> times (1, 5, 9, .. 29) for features 2, 3.
// ..
rhs_shmem2[threadIdx.y+32][threadIdx.x] = make_float2(rhs_pf0.y, rhs_pf1.y);
// Row 64 -> times (2, 6, 10, .. 30) for features 0, 1.
// Row 65 -> times (2, 6, 10, .. 30) for features 2, 3.
rhs_shmem2[threadIdx.y+64][threadIdx.x] = make_float2(rhs_pf0.z, rhs_pf1.z);
// Row 96 -> times (3, 7, 11, .. 31) for features 0, 1.
// Row 97 -> times (3, 7, 11, .. 31) for features 2, 3.
rhs_shmem2[threadIdx.y+96][threadIdx.x] = make_float2(rhs_pf0.w, rhs_pf1.w);
// LHS.
// Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
// Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125)
// ...
// Row 8 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
// Row 15 (time 7) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127)
#define add_vals(a_feat1, a_feat2, f1, f2, f3, f4)\
results[0].x += a_feat1.x * f1.x;\
results[1].x += a_feat1.x * f1.y;\
results[2].x += a_feat1.x * f2.x;\
results[3].x += a_feat1.x * f2.y;\
results[4].x += a_feat1.x * f3.x;\
results[5].x += a_feat1.x * f3.y;\
results[6].x += a_feat1.x * f4.x;\
results[7].x += a_feat1.x * f4.y;\
\
results[0].y += a_feat1.y * f1.x;\
results[1].y += a_feat1.y * f1.y;\
results[2].y += a_feat1.y * f2.x;\
results[3].y += a_feat1.y * f2.y;\
results[4].y += a_feat1.y * f3.x;\
results[5].y += a_feat1.y * f3.y;\
results[6].y += a_feat1.y * f4.x;\
results[7].y += a_feat1.y * f4.y;\
\
results[0].z += a_feat2.x * f1.x;\
results[1].z += a_feat2.x * f1.y;\
results[2].z += a_feat2.x * f2.x;\
results[3].z += a_feat2.x * f2.y;\
results[4].z += a_feat2.x * f3.x;\
results[5].z += a_feat2.x * f3.y;\
results[6].z += a_feat2.x * f4.x;\
results[7].z += a_feat2.x * f4.y;\
\
results[0].w += a_feat2.y * f1.x;\
results[1].w += a_feat2.y * f1.y;\
results[2].w += a_feat2.y * f2.x;\
results[3].w += a_feat2.y * f2.y;\
results[4].w += a_feat2.y * f3.x;\
results[5].w += a_feat2.y * f3.y;\
results[6].w += a_feat2.y * f4.x;\
results[7].w += a_feat2.y * f4.y;\
lhs_shmem2[threadIdx.y/4][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.x, lhs_pf0.y);
lhs_shmem2[threadIdx.y/4+8][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.x, lhs_pf1.y);
lhs_shmem2[threadIdx.y/4+16][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.x, lhs_pf2.y);
lhs_shmem2[threadIdx.y/4+24][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.x, lhs_pf3.y);
lhs_shmem2[threadIdx.y/4 + 32][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.z, lhs_pf0.w);
lhs_shmem2[threadIdx.y/4 + 40][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.z, lhs_pf1.w);
lhs_shmem2[threadIdx.y/4 + 48][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.z, lhs_pf2.w);
lhs_shmem2[threadIdx.y/4 + 56][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.z, lhs_pf3.w);
__syncthreads();
// Do the multiplies.
#pragma unroll
for (int koff = 0; koff < 32; koff ++) {
float2 a3 = lhs_shmem2[koff][threadIdx.x + (threadIdx.y % 4) * 8];
float2 a4 = lhs_shmem2[koff + 32][threadIdx.x + (threadIdx.y % 4) * 8];
// first feature is at (threadIdx.y/4) * 8 last is at start + 8.
int start_feature = (threadIdx.y / 4) * 8;
float2 br1 = rhs_shmem2[start_feature/2 + (koff % 4) * 32][koff/4];
float2 br2 = rhs_shmem2[start_feature/2 + 1 + (koff % 4) * 32][koff/4];
float2 br3 = rhs_shmem2[start_feature/2 + 2 + (koff % 4) * 32][koff/4];
float2 br4 = rhs_shmem2[start_feature/2 + 3 + (koff % 4) * 32][koff/4];
add_vals(a3, a4, br1, br2, br3, br4)
}
__syncthreads();
} // end loop over k
__syncthreads();
Index horiz_base = (threadIdx.y/4)*8+base_n;
if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
} else if (!CHECK_RHS_BOUNDARY) {
if (lhs_vert + 3 < m_size) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
} else if (lhs_vert + 2 < m_size) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
}
} else if (lhs_vert + 1 < m_size) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
}
} else if (lhs_vert < m_size) {
for (int i = 0; i < 8; i++) {
output(lhs_vert, horiz_base + i) = results[i].x;
}
}
} else if (!CHECK_LHS_BOUNDARY) {
// CHECK BOUNDARY_B
for (int i = 0; i < 8; i++) {
if (horiz_base + i < n_size) {
output(lhs_vert, horiz_base + i) = results[i].x;
output(lhs_vert + 1, horiz_base + i) = results[i].y;
output(lhs_vert + 2, horiz_base + i) = results[i].z;
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
}
} else {
// CHECK both boundaries.
for (int i = 0; i < 8; i++) {
if (horiz_base + i < n_size) {
if (lhs_vert < m_size)
output(lhs_vert, horiz_base + i) = results[i].x;
if (lhs_vert + 1 < m_size)
output(lhs_vert + 1, horiz_base + i) = results[i].y;
if (lhs_vert + 2 < m_size)
output(lhs_vert + 2, horiz_base + i) = results[i].z;
if (lhs_vert + 3 < m_size)
output(lhs_vert + 3, horiz_base + i) = results[i].w;
}
}
}
}
template<typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper>
__global__ void
__launch_bounds__(256)
EigenFloatContractionKernel(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output,
const Index m_size, const Index n_size, const Index k_size) {
__shared__ float2 lhs_shmem[64*32];
__shared__ float2 rhs_shmem[128*8];
typedef float2 LHS_MEM[64][32];
typedef float2 RHS_MEM[128][8];
typedef float2 LHS_MEM16x16[32][16];
typedef float2 RHS_MEM16x16[64][8];
const Index m_block_idx = blockIdx.x;
const Index n_block_idx = blockIdx.y;
const Index base_m = 128 * m_block_idx;
const Index base_n = 64 * n_block_idx;
bool check_rhs = (base_n + 63) >= n_size;
bool check_lhs128 = (base_m + 127) >= m_size;
if (!check_rhs) {
if (!check_lhs128) {
// >= 128 rows left
EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(
lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
} else {
EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(
lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
}
} else {
if (!check_lhs128) {
// >= 128 rows left
EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(
lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
} else {
EigenFloatContractionKernelInternal<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(
lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n);
}
}
}
template<typename Index, typename LhsMapper,
typename RhsMapper, typename OutputMapper>
__global__ void
__launch_bounds__(256)
EigenFloatContractionKernel16x16(const LhsMapper lhs, const RhsMapper rhs,
const OutputMapper output,
const Index m_size, const Index n_size, const Index k_size) {
__shared__ float2 lhs_shmem[32][16];
__shared__ float2 rhs_shmem[64][8];
const Index m_block_idx = blockIdx.x;
const Index n_block_idx = blockIdx.y;
const Index base_m = 64 * m_block_idx;
const Index base_n = 64 * n_block_idx;
if (base_m + 63 < m_size) {
if (base_n + 63 < n_size) {
EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
} else {
EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, false, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
}
} else {
if (base_n + 63 < n_size) {
EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, false>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
} else {
EigenFloatContractionKernelInternal16x16<Index, LhsMapper, RhsMapper, OutputMapper, true, true>(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n);
}
}
}
template<typename Indices, typename LeftArgType, typename RightArgType>
struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, GpuDevice> :
public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, GpuDevice> > {
typedef GpuDevice Device;
typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
typedef TensorContractionEvaluatorBase<Self> Base;
typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, GpuDevice>::type PacketReturnType;
enum {
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
};
// Most of the code is assuming that both input tensors are ColMajor. If the
// inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
// If we want to compute A * B = C, where A is LHS and B is RHS, the code
// will pretend B is LHS and A is RHS.
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
static const int LDims =
internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
static const int RDims =
internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
static const int ContractDims = internal::array_size<Indices>::value;
typedef array<Index, LDims> left_dim_mapper_t;
typedef array<Index, RDims> right_dim_mapper_t;
typedef array<Index, ContractDims> contract_t;
typedef array<Index, LDims - ContractDims> left_nocontract_t;
typedef array<Index, RDims - ContractDims> right_nocontract_t;
static const int NumDims = LDims + RDims - 2 * ContractDims;
typedef DSizes<Index, NumDims> Dimensions;
// typedefs needed in evalTo
typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
typedef typename LeftEvaluator::Dimensions LeftDimensions;
typedef typename RightEvaluator::Dimensions RightDimensions;
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
Base(op, device) {}
// We need to redefine this method to make nvcc happy
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
this->m_leftImpl.evalSubExprsIfNeeded(NULL);
this->m_rightImpl.evalSubExprsIfNeeded(NULL);
if (data) {
evalTo(data);
return false;
} else {
this->m_result = static_cast<Scalar *>(this->m_device.allocate(this->dimensions().TotalSize() * sizeof(Scalar)));
evalTo(this->m_result);
return true;
}
}
void evalTo(Scalar* buffer) const {
if (this->m_lhs_inner_dim_contiguous) {
if (this->m_rhs_inner_dim_contiguous) {
if (this->m_rhs_inner_dim_reordered) {
evalTyped<true, true, true, Unaligned>(buffer);
}
else {
evalTyped<true, true, false, Unaligned>(buffer);
}
}
else {
if (this->m_rhs_inner_dim_reordered) {
evalTyped<true, false, true, Unaligned>(buffer);
}
else {
evalTyped<true, false, false, Unaligned>(buffer);
}
}
}
else {
if (this->m_rhs_inner_dim_contiguous) {
if (this->m_rhs_inner_dim_reordered) {
evalTyped<false, true, true, Unaligned>(buffer);
}
else {
evalTyped<false, true, false, Unaligned>(buffer);
}
}
else {
if (this->m_rhs_inner_dim_reordered) {
evalTyped<false, false, true, Unaligned>(buffer);
}
else {
evalTyped<false, false, false, Unaligned>(buffer);
}
}
}
}
template <typename LhsScalar, typename RhsScalar, typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels {
static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
const Index m_blocks = (m + 63) / 64;
const Index n_blocks = (n + 63) / 64;
const dim3 num_blocks(m_blocks, n_blocks, 1);
const dim3 block_size(8, 8, 8);
LAUNCH_CUDA_KERNEL((EigenContractionKernel<Scalar, Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
}
};
template <typename Index, typename LhsMapper, typename RhsMapper, typename OutputMapper> struct LaunchKernels<float, float, Index, LhsMapper, RhsMapper, OutputMapper> {
static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) {
if (m < 768 || n < 768) {
const Index m_blocks = (m + 63) / 64;
const Index n_blocks = (n + 63) / 64;
const dim3 num_blocks(m_blocks, n_blocks, 1);
const dim3 block_size(16, 16, 1);
LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel16x16<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
} else {
const Index m_blocks = (m + 127) / 128;
const Index n_blocks = (n + 63) / 64;
const dim3 num_blocks(m_blocks, n_blocks, 1);
const dim3 block_size(8, 32, 1);
LAUNCH_CUDA_KERNEL((EigenFloatContractionKernel<Index, LhsMapper, RhsMapper, OutputMapper>), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k);
}
}
};
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
void evalTyped(Scalar* buffer) const {
// columns in left side, rows in right side
const Index k = this->m_k_size;
EIGEN_UNUSED_VARIABLE(k)
// rows in left side
const Index m = this->m_i_size;
// columns in right side
const Index n = this->m_j_size;
// zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
LeftEvaluator, left_nocontract_t,
contract_t, 4,
lhs_inner_dim_contiguous,
false, Unaligned> LhsMapper;
typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
RightEvaluator, right_nocontract_t,
contract_t, 4,
rhs_inner_dim_contiguous,
rhs_inner_dim_reordered, Unaligned> RhsMapper;
typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
// initialize data mappers
LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
this->m_left_contracting_strides, this->m_k_strides);
RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
this->m_right_contracting_strides, this->m_k_strides);
OutputMapper output(buffer, m);
setCudaSharedMemConfig(cudaSharedMemBankSizeEightByte);
LaunchKernels<LhsScalar, RhsScalar, Index, LhsMapper, RhsMapper, OutputMapper>::Run(lhs, rhs, output, m, n, k, this->m_device);
}
};
} // end namespace Eigen
#endif // EIGEN_USE_GPU and __CUDACC__
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_CUDA_H
| 62,023 | 43.557471 | 184 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H
namespace Eigen {
namespace internal {
enum {
Rhs = 0,
Lhs = 1
};
/*
* Implementation of the Eigen blas_data_mapper class for tensors.
*/
template <typename Tensor, bool HasRawAccess> struct CoeffLoader {
enum {
DirectOffsets = false
};
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_tensor(tensor) { }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index) {
eigen_assert(false && "unsupported");
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return m_tensor.coeff(index); }
template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
typename Tensor::PacketReturnType packet(typename Tensor::Index index) const
{
return m_tensor.template packet<LoadMode>(index);
}
private:
const Tensor m_tensor;
};
template <typename Tensor> struct CoeffLoader<Tensor, true> {
enum {
DirectOffsets = true
};
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_data(tensor.data()) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) {
m_data += offset;
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return loadConstant(m_data+index); }
template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
typename Tensor::PacketReturnType packet(typename Tensor::Index index) const
{
return internal::ploadt_ro<typename Tensor::PacketReturnType, LoadMode>(m_data + index);
}
private:
typedef typename Tensor::Scalar Scalar;
const Scalar* m_data;
};
template<typename Scalar, typename Index, int side,
typename Tensor,
typename nocontract_t, typename contract_t,
int packet_size, bool inner_dim_contiguous, int Alignment>
class SimpleTensorContractionMapper {
public:
EIGEN_DEVICE_FUNC
SimpleTensorContractionMapper(const Tensor& tensor,
const nocontract_t& nocontract_strides,
const nocontract_t& ij_strides,
const contract_t& contract_strides,
const contract_t& k_strides) :
m_tensor(tensor),
m_nocontract_strides(nocontract_strides),
m_ij_strides(ij_strides),
m_contract_strides(contract_strides),
m_k_strides(k_strides) { }
enum {
DirectOffsets = CoeffLoader<Tensor, Tensor::RawAccess>::DirectOffsets
};
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) {
m_tensor.offsetBuffer(offset);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void prefetch(Index /*i*/) { }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar operator()(Index row) const {
// column major assumption
return operator()(row, 0);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar operator()(Index row, Index col) const {
return m_tensor.coeff(computeIndex(row, col));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index computeIndex(Index row, Index col) const {
const bool left = (side == Lhs);
Index nocontract_val = left ? row : col;
Index linidx = 0;
for (int i = static_cast<int>(array_size<nocontract_t>::value) - 1; i > 0; i--) {
const Index idx = nocontract_val / m_ij_strides[i];
linidx += idx * m_nocontract_strides[i];
nocontract_val -= idx * m_ij_strides[i];
}
if (array_size<typename Tensor::Dimensions>::value > array_size<contract_t>::value) {
if (side == Lhs && inner_dim_contiguous) {
eigen_assert(m_nocontract_strides[0] == 1);
linidx += nocontract_val;
} else {
linidx += nocontract_val * m_nocontract_strides[0];
}
}
Index contract_val = left ? col : row;
if(array_size<contract_t>::value > 0) {
for (int i = static_cast<int>(array_size<contract_t>::value) - 1; i > 0; i--) {
const Index idx = contract_val / m_k_strides[i];
linidx += idx * m_contract_strides[i];
contract_val -= idx * m_k_strides[i];
}
if (side == Rhs && inner_dim_contiguous) {
eigen_assert(m_contract_strides[0] == 1);
linidx += contract_val;
} else {
linidx += contract_val * m_contract_strides[0];
}
}
return linidx;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE IndexPair<Index> computeIndexPair(Index row, Index col, const Index distance) const {
const bool left = (side == Lhs);
Index nocontract_val[2] = {left ? row : col, left ? row + distance : col};
Index linidx[2] = {0, 0};
if (array_size<typename Tensor::Dimensions>::value > array_size<contract_t>::value) {
for (int i = static_cast<int>(array_size<nocontract_t>::value) - 1; i > 0; i--) {
const Index idx0 = nocontract_val[0] / m_ij_strides[i];
const Index idx1 = nocontract_val[1] / m_ij_strides[i];
linidx[0] += idx0 * m_nocontract_strides[i];
linidx[1] += idx1 * m_nocontract_strides[i];
nocontract_val[0] -= idx0 * m_ij_strides[i];
nocontract_val[1] -= idx1 * m_ij_strides[i];
}
if (side == Lhs && inner_dim_contiguous) {
eigen_assert(m_nocontract_strides[0] == 1);
linidx[0] += nocontract_val[0];
linidx[1] += nocontract_val[1];
} else {
linidx[0] += nocontract_val[0] * m_nocontract_strides[0];
linidx[1] += nocontract_val[1] * m_nocontract_strides[0];
}
}
Index contract_val[2] = {left ? col : row, left ? col : row + distance};
if (array_size<contract_t>::value> 0) {
for (int i = static_cast<int>(array_size<contract_t>::value) - 1; i > 0; i--) {
const Index idx0 = contract_val[0] / m_k_strides[i];
const Index idx1 = contract_val[1] / m_k_strides[i];
linidx[0] += idx0 * m_contract_strides[i];
linidx[1] += idx1 * m_contract_strides[i];
contract_val[0] -= idx0 * m_k_strides[i];
contract_val[1] -= idx1 * m_k_strides[i];
}
if (side == Rhs && inner_dim_contiguous) {
eigen_assert(m_contract_strides[0] == 1);
linidx[0] += contract_val[0];
linidx[1] += contract_val[1];
} else {
linidx[0] += contract_val[0] * m_contract_strides[0];
linidx[1] += contract_val[1] * m_contract_strides[0];
}
}
return IndexPair<Index>(linidx[0], linidx[1]);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index firstAligned(Index size) const {
// Only claim alignment when we can compute the actual stride (ie when we're
// dealing with the lhs with inner_dim_contiguous. This is because the
// matrix-vector product relies on the stride when dealing with aligned inputs.
return (Alignment == Aligned) && (side == Lhs) && inner_dim_contiguous ? 0 : size;
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index stride() const {
return ((side == Lhs) && inner_dim_contiguous && array_size<contract_t>::value > 0) ? m_contract_strides[0] : 1;
}
protected:
CoeffLoader<Tensor, Tensor::RawAccess> m_tensor;
const nocontract_t m_nocontract_strides;
const nocontract_t m_ij_strides;
const contract_t m_contract_strides;
const contract_t m_k_strides;
};
template<typename Scalar, typename Index, int side,
typename Tensor,
typename nocontract_t, typename contract_t,
int packet_size, bool inner_dim_contiguous,
bool inner_dim_reordered, int Alignment>
class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, Alignment>
{
public:
typedef SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, Alignment> ParentMapper;
EIGEN_DEVICE_FUNC
BaseTensorContractionMapper(const Tensor& tensor,
const nocontract_t& nocontract_strides,
const nocontract_t& ij_strides,
const contract_t& contract_strides,
const contract_t& k_strides) :
ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
typedef typename Tensor::PacketReturnType Packet;
typedef typename unpacket_traits<Packet>::half HalfPacket;
template <int AlignmentType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const {
// whole method makes column major assumption
// don't need to add offsets for now (because operator handles that)
// current code assumes packet size must be a multiple of 2
EIGEN_STATIC_ASSERT(packet_size % 2 == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
if (Tensor::PacketAccess && inner_dim_contiguous && !inner_dim_reordered) {
const Index index = this->computeIndex(i, j);
eigen_assert(this->computeIndex(i+packet_size-1, j) == index + packet_size-1);
return this->m_tensor.template packet<AlignmentType>(index);
}
const IndexPair<Index> indexPair = this->computeIndexPair(i, j, packet_size - 1);
const Index first = indexPair.first;
const Index last = indexPair.second;
// We can always do optimized packet reads from left hand side right now, because
// the vertical matrix dimension on the left hand side is never contracting.
// On the right hand side we need to check if the contracting dimensions may have
// been shuffled first.
if (Tensor::PacketAccess &&
(side == Lhs || internal::array_size<contract_t>::value <= 1 || !inner_dim_reordered) &&
(last - first) == (packet_size - 1)) {
return this->m_tensor.template packet<AlignmentType>(first);
}
EIGEN_ALIGN_MAX Scalar data[packet_size];
data[0] = this->m_tensor.coeff(first);
for (Index k = 1; k < packet_size - 1; k += 2) {
const IndexPair<Index> internal_pair = this->computeIndexPair(i + k, j, 1);
data[k] = this->m_tensor.coeff(internal_pair.first);
data[k + 1] = this->m_tensor.coeff(internal_pair.second);
}
data[packet_size - 1] = this->m_tensor.coeff(last);
return pload<Packet>(data);
}
template <int AlignmentType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {
// whole method makes column major assumption
// don't need to add offsets for now (because operator handles that)
const Index half_packet_size = unpacket_traits<HalfPacket>::size;
if (half_packet_size == packet_size) {
return loadPacket<AlignmentType>(i, j);
}
EIGEN_ALIGN_MAX Scalar data[half_packet_size];
for (Index k = 0; k < half_packet_size; k++) {
data[k] = operator()(i + k, j);
}
return pload<HalfPacket>(data);
}
};
template<typename Scalar, typename Index, int side,
typename Tensor,
typename nocontract_t, typename contract_t,
bool inner_dim_contiguous,
bool inner_dim_reordered, int Alignment>
class BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, inner_dim_reordered, Alignment> : public SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, Alignment>
{
public:
typedef SimpleTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, 1, inner_dim_contiguous, Alignment> ParentMapper;
EIGEN_DEVICE_FUNC
BaseTensorContractionMapper(const Tensor& tensor,
const nocontract_t& nocontract_strides,
const nocontract_t& ij_strides,
const contract_t& contract_strides,
const contract_t& k_strides) :
ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
typedef typename Tensor::PacketReturnType Packet;
template <int> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const {
EIGEN_ALIGN_MAX Scalar data[1];
data[0] = this->m_tensor.coeff(this->computeIndex(i, j));
return pload<typename Tensor::PacketReturnType>(data);
}
template <int> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Packet loadHalfPacket(Index i, Index j) const {
return loadPacket(i, j);
}
};
template<typename Scalar, typename Index, int side,
typename Tensor,
typename nocontract_t, typename contract_t,
int packet_size,
bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment>
class TensorContractionSubMapper {
public:
typedef typename Tensor::PacketReturnType Packet;
typedef typename unpacket_traits<Packet>::half HalfPacket;
typedef BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> ParentMapper;
typedef TensorContractionSubMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> Self;
typedef Self LinearMapper;
enum {
// We can use direct offsets iff the parent mapper supports then and we can compute the strides.
// TODO: we should also enable direct offsets for the Rhs case.
UseDirectOffsets = ParentMapper::DirectOffsets && (side == Lhs) && inner_dim_contiguous && (array_size<contract_t>::value > 0)
};
EIGEN_DEVICE_FUNC TensorContractionSubMapper(const ParentMapper& base_mapper, Index vert_offset, Index horiz_offset)
: m_base_mapper(base_mapper), m_vert_offset(vert_offset), m_horiz_offset(horiz_offset) {
// Bake the offsets into the buffer used by the base mapper whenever possible. This avoids the need to recompute
// this offset every time we attempt to access a coefficient.
if (UseDirectOffsets) {
Index stride = m_base_mapper.stride();
m_base_mapper.offsetBuffer(vert_offset + horiz_offset * stride);
}
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const {
if (UseDirectOffsets) {
return m_base_mapper(i, 0);
}
return m_base_mapper(i + m_vert_offset, m_horiz_offset);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i, Index j) const {
if (UseDirectOffsets) {
return m_base_mapper(i, j);
}
return m_base_mapper(i + m_vert_offset, j + m_horiz_offset);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {
if (UseDirectOffsets) {
return m_base_mapper.template loadPacket<Alignment>(i, 0);
}
return m_base_mapper.template loadPacket<Alignment>(i + m_vert_offset, m_horiz_offset);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const {
if (UseDirectOffsets) {
return m_base_mapper.template loadPacket<Alignment>(i, j);
}
return m_base_mapper.template loadPacket<Alignment>(i + m_vert_offset, j + m_horiz_offset);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const {
if (UseDirectOffsets) {
return m_base_mapper.template loadHalfPacket<Alignment>(i, 0);
}
return m_base_mapper.template loadHalfPacket<Alignment>(i + m_vert_offset, m_horiz_offset);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, Packet p) const {
if (UseDirectOffsets) {
m_base_mapper.storePacket(i, 0, p);
}
m_base_mapper.storePacket(i + m_vert_offset, m_horiz_offset, p);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const {
if (UseDirectOffsets) {
return LinearMapper(m_base_mapper, i, j);
}
return LinearMapper(m_base_mapper, i + m_vert_offset, j + m_horiz_offset);
}
template <typename PacketT, int AlignmentType>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i) const {
EIGEN_STATIC_ASSERT((internal::is_same<PacketT, Packet>::value), YOU_MADE_A_PROGRAMMING_MISTAKE);
const int ActualAlignment = (AlignmentType == Aligned) && (Alignment == Aligned) ? Aligned : Unaligned;
if (UseDirectOffsets) {
return m_base_mapper.template loadPacket<ActualAlignment>(i, 0);
}
return m_base_mapper.template loadPacket<ActualAlignment>(i + m_vert_offset, m_horiz_offset);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool aligned(Index) const {
return false;
}
private:
ParentMapper m_base_mapper;
const Index m_vert_offset;
const Index m_horiz_offset;
};
template<typename Scalar_, typename Index, int side,
typename Tensor,
typename nocontract_t, typename contract_t,
int packet_size,
bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment>
class TensorContractionInputMapper
: public BaseTensorContractionMapper<Scalar_, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> {
public:
typedef Scalar_ Scalar;
typedef BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> Base;
typedef TensorContractionSubMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous, inner_dim_reordered, Alignment> SubMapper;
typedef SubMapper VectorMapper;
EIGEN_DEVICE_FUNC TensorContractionInputMapper(const Tensor& tensor,
const nocontract_t& nocontract_strides,
const nocontract_t& ij_strides,
const contract_t& contract_strides,
const contract_t& k_strides)
: Base(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE SubMapper getSubMapper(Index i, Index j) const {
return SubMapper(*this, i, j);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const {
return VectorMapper(*this, i, j);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H
| 18,564 | 38.668803 | 275 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H
// evaluator for thread pool device
#ifdef EIGEN_USE_THREADS
namespace Eigen {
#ifdef EIGEN_USE_SIMPLE_THREAD_POOL
namespace internal {
template<typename LhsScalar, typename LhsMapper, typename Index>
struct packLhsArg {
LhsScalar* blockA;
const LhsMapper& lhs;
const Index m_start;
const Index k_start;
const Index mc;
const Index kc;
};
template<typename LhsScalar, typename RhsScalar, typename RhsMapper, typename OutputMapper, typename Index>
struct packRhsAndKernelArg {
const MaxSizeVector<LhsScalar*>* blockAs;
RhsScalar* blockB;
const RhsMapper& rhs;
OutputMapper& output;
const Index m;
const Index k;
const Index n;
const Index mc;
const Index kc;
const Index nc;
const Index num_threads;
const Index num_blockAs;
const Index max_m;
const Index k_block_idx;
const Index m_block_idx;
const Index n_block_idx;
const Index m_blocks;
const Index n_blocks;
MaxSizeVector<Notification*>* kernel_notifications;
const MaxSizeVector<Notification*>* lhs_notifications;
const bool need_to_pack;
};
} // end namespace internal
#endif // EIGEN_USE_SIMPLE_THREAD_POOL
template<typename Indices, typename LeftArgType, typename RightArgType>
struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, ThreadPoolDevice> :
public TensorContractionEvaluatorBase<TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, ThreadPoolDevice> > {
typedef ThreadPoolDevice Device;
typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType>, Device> Self;
typedef TensorContractionEvaluatorBase<Self> Base;
typedef TensorContractionOp<Indices, LeftArgType, RightArgType> XprType;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
};
// Most of the code is assuming that both input tensors are ColMajor. If the
// inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
// If we want to compute A * B = C, where A is LHS and B is RHS, the code
// will pretend B is LHS and A is RHS.
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
typedef typename internal::conditional<
static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
static const int LDims =
internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
static const int RDims =
internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
static const int ContractDims = internal::array_size<Indices>::value;
typedef array<Index, LDims> left_dim_mapper_t;
typedef array<Index, RDims> right_dim_mapper_t;
typedef array<Index, ContractDims> contract_t;
typedef array<Index, LDims - ContractDims> left_nocontract_t;
typedef array<Index, RDims - ContractDims> right_nocontract_t;
static const int NumDims = LDims + RDims - 2 * ContractDims;
typedef DSizes<Index, NumDims> Dimensions;
// typedefs needed in evalTo
typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
TensorEvaluator(const XprType& op, const Device& device) :
Base(op, device) {}
#ifndef EIGEN_USE_SIMPLE_THREAD_POOL
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
bool rhs_inner_dim_reordered, int Alignment>
void evalProduct(Scalar* buffer) const {
typedef
typename internal::remove_const<typename EvalLeftArgType::Scalar>::type
LhsScalar;
typedef
typename internal::remove_const<typename EvalRightArgType::Scalar>::type
RhsScalar;
typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
typedef internal::TensorContractionInputMapper<
LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t,
contract_t, internal::packet_traits<LhsScalar>::size,
lhs_inner_dim_contiguous, false, Unaligned>
LhsMapper;
typedef internal::TensorContractionInputMapper<
RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t,
contract_t, internal::packet_traits<RhsScalar>::size,
rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Unaligned>
RhsMapper;
typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
typedef internal::gemm_pack_lhs<LhsScalar, Index,
typename LhsMapper::SubMapper, Traits::mr,
Traits::LhsProgress, ColMajor>
LhsPacker;
typedef internal::gemm_pack_rhs<
RhsScalar, Index, typename RhsMapper::SubMapper, Traits::nr, ColMajor>
RhsPacker;
typedef internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper,
Traits::mr, Traits::nr, false, false>
GebpKernel;
const Index m = this->m_i_size;
const Index n = this->m_j_size;
const Index k = this->m_k_size;
if (m == 0 || n == 0 || k == 0) return;
// Compute a set of algorithm parameters:
// - kernel block sizes (bm, bn, bk)
// - task grain sizes (number of kernels executed per task: gm, gn)
// - number of threads
// - sharding by row/column
// - parallel packing or first lhs then rhs
// and some derived parameters:
// - number of tasks (nm, nn, nk)
// - number of kernels (nm0, nn0)
// Unfortunately, all these parameters are tightly interdependent.
// So in some cases we first compute approximate values, then compute other
// values based on these approximations and then refine the approximations.
// There are lots of heuristics here. There is some reasoning behind them,
// but ultimately they are just tuned on contraction benchmarks for
// different input configurations, thread counts and instruction sets.
// So feel free to question any of them.
// Compute whether we want to shard by row or by column.
// This is a first approximation, it will be refined later. Since we don't
// know number of threads yet we use 2, because what's we are most
// interested in at this point is whether it makes sense to use
// parallelization at all or not.
bool shard_by_col = shardByCol(m, n, 2);
// First approximation of kernel blocking sizes.
// Again, we don't know number of threads yet, so we use 2.
Index bm, bn, bk;
if (shard_by_col) {
internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
internal::ShardByCol>
blocking(k, m, n, 2);
bm = blocking.mc();
bn = blocking.nc();
bk = blocking.kc();
} else {
internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
internal::ShardByRow>
blocking(k, m, n, 2);
bm = blocking.mc();
bn = blocking.nc();
bk = blocking.kc();
}
// Compute optimal number of threads.
// Note: we use bk instead of k here because we are interested in amount of
// _parallelizable_ computations, and computations are not parallelizable
// across k dimension.
const TensorOpCost cost =
contractionCost(m, n, bm, bn, bk, shard_by_col, false);
int num_threads = TensorCostModel<ThreadPoolDevice>::numThreads(
static_cast<double>(n) * m, cost, this->m_device.numThreads());
// TODO(dvyukov): this is a stop-gap to prevent regressions while the cost
// model is not tuned. Remove this when the cost model is tuned.
if (n == 1) num_threads = 1;
if (num_threads == 1) {
// The single-threaded algorithm should be faster in this case.
if (n == 1)
this->template evalGemv<lhs_inner_dim_contiguous,
rhs_inner_dim_contiguous,
rhs_inner_dim_reordered, Alignment>(buffer);
else
this->template evalGemm<lhs_inner_dim_contiguous,
rhs_inner_dim_contiguous,
rhs_inner_dim_reordered, Alignment>(buffer);
return;
}
// Now that we know number of threads, recalculate sharding and blocking.
shard_by_col = shardByCol(m, n, num_threads);
if (shard_by_col) {
internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
internal::ShardByCol>
blocking(k, m, n, num_threads);
bm = blocking.mc();
bn = blocking.nc();
bk = blocking.kc();
} else {
internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index,
internal::ShardByRow>
blocking(k, m, n, num_threads);
bm = blocking.mc();
bn = blocking.nc();
bk = blocking.kc();
}
// Number of kernels for each dimension.
Index nm0 = divup(m, bm);
Index nn0 = divup(n, bn);
Index nk = divup(k, bk);
// Calculate task grain size (number of kernels executed per task).
// This task size coarsening serves two purposes:
// 1. It reduces per-task overheads including synchronization overheads.
// 2. It allows to use caches better (reuse the same packed rhs in several
// consecutive kernels).
Index gm = 1;
Index gn = 1;
// If we are sharding by column, then we prefer to reduce rows first.
if (shard_by_col) {
gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col);
gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col);
} else {
gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col);
gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col);
}
// Number of tasks in each dimension.
Index nm = divup(nm0, gm);
Index nn = divup(nn0, gn);
// Last by not least, decide whether we want to issue both lhs and rhs
// packing in parallel; or issue lhs packing first, and then issue rhs
// packing when lhs packing completes (for !shard_by_col lhs and rhs are
// swapped). Parallel packing allows more parallelism (for both packing and
// kernels), while sequential packing provides better locality (once
// a thread finishes rhs packing it proceed to kernels with that rhs).
// First, we are interested in parallel packing if there are few tasks.
bool parallel_pack = num_threads >= nm * nn;
// Also do parallel packing if all data fits into L2$.
if (m * bk * Index(sizeof(LhsScalar)) + n * bk * Index(sizeof(RhsScalar)) <=
l2CacheSize() * num_threads)
parallel_pack = true;
// But don't do it if we will use each rhs only once. Locality seems to be
// more important in this case.
if ((shard_by_col ? nm : nn) == 1) parallel_pack = false;
LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides,
this->m_i_strides, this->m_left_contracting_strides,
this->m_k_strides);
RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides,
this->m_j_strides, this->m_right_contracting_strides,
this->m_k_strides);
Context<LhsPacker, RhsPacker, GebpKernel, LhsMapper, RhsMapper,
OutputMapper>(this->m_device, num_threads, lhs, rhs, buffer, m, n,
k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0,
shard_by_col, parallel_pack)
.run();
}
// Context coordinates a single parallel gemm operation.
template <typename LhsPacker, typename RhsPacker, typename GebpKernel,
typename LhsMapper, typename RhsMapper, typename OutputMapper>
class Context {
public:
Context(const Device& device, int num_threads, LhsMapper& lhs,
RhsMapper& rhs, Scalar* buffer, Index tm, Index tn, Index tk, Index bm,
Index bn, Index bk, Index nm, Index nn, Index nk, Index gm,
Index gn, Index nm0, Index nn0, bool shard_by_col,
bool parallel_pack)
: device_(device),
lhs_(lhs),
rhs_(rhs),
buffer_(buffer),
output_(buffer, tm),
num_threads_(num_threads),
shard_by_col_(shard_by_col),
parallel_pack_(parallel_pack),
m_(tm),
n_(tn),
k_(tk),
bm_(bm),
bn_(bn),
bk_(bk),
nm_(nm),
nn_(nn),
nk_(nk),
gm_(gm),
gn_(gn),
nm0_(nm0),
nn0_(nn0)
{
for (Index x = 0; x < P; x++) {
// Normal number of notifications for k slice switch is
// nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only
// nm_ + nn_ notifications, because they will not receive notifications
// from preceeding kernels.
state_switch_[x] =
x == 0
? 1
: (parallel_pack_ ? nn_ + nm_ : (shard_by_col_ ? nn_ : nm_)) +
(x == P - 1 ? nm_ * nn_ : 0);
state_packing_ready_[x] =
parallel_pack_ ? 0 : (shard_by_col_ ? nm_ : nn_);
state_kernel_[x] = new std::atomic<uint8_t>*[nm_];
for (Index m = 0; m < nm_; m++) {
state_kernel_[x][m] = new std::atomic<uint8_t>[nn_];
// Kernels generally receive 3 notifications (previous kernel + 2
// packing), but the first slice won't get notifications from previous
// kernels.
for (Index n = 0; n < nn_; n++)
state_kernel_[x][m][n].store(
(x == 0 ? 0 : 1) + (parallel_pack_ ? 2 : 1),
std::memory_order_relaxed);
}
}
// Allocate memory for packed rhs/lhs matrices.
size_t align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1);
size_t lhs_size =
divup<size_t>(bm_ * bk_ * sizeof(LhsScalar), align) * align;
size_t rhs_size =
divup<size_t>(bn_ * bk_ * sizeof(RhsScalar), align) * align;
packed_mem_ = static_cast<char*>(internal::aligned_malloc(
(nm0_ * lhs_size + nn0_ * rhs_size) * std::min<size_t>(nk_, P - 1)));
char* mem = static_cast<char*>(packed_mem_);
for (Index x = 0; x < numext::mini<Index>(nk_, P - 1); x++) {
packed_lhs_[x].resize(nm0_);
for (Index m = 0; m < nm0_; m++) {
packed_lhs_[x][m] = reinterpret_cast<LhsScalar*>(mem);
mem += lhs_size;
}
packed_rhs_[x].resize(nn0_);
for (Index n = 0; n < nn0_; n++) {
packed_rhs_[x][n] = reinterpret_cast<RhsScalar*>(mem);
mem += rhs_size;
}
}
}
~Context() {
for (Index x = 0; x < P; x++) {
for (Index m = 0; m < nm_; m++) delete[] state_kernel_[x][m];
delete[] state_kernel_[x];
}
internal::aligned_free(packed_mem_);
}
void run() {
// Kick off packing of the first slice.
signal_switch(0, 1);
// Wait for overall completion.
// TODO(dvyukov): this wait can lead to deadlock.
// If nthreads contractions are concurrently submitted from worker
// threads, this wait will block all worker threads and the system will
// deadlock.
done_.Wait();
}
private:
Notification done_;
const Device& device_;
LhsMapper& lhs_;
RhsMapper& rhs_;
Scalar* const buffer_;
OutputMapper output_;
const int num_threads_;
const bool shard_by_col_;
const bool parallel_pack_;
// Matrix sizes.
const Index m_;
const Index n_;
const Index k_;
// Block sizes.
const Index bm_;
const Index bn_;
const Index bk_;
// Number of tasks.
const Index nm_;
const Index nn_;
const Index nk_;
// Task grain sizes (number of kernels executed per task).
const Index gm_;
const Index gn_;
// Number of blocks (this is different from ni_/nn_ because of task size
// coarsening).
const Index nm0_;
const Index nn0_;
// Parallelization strategy.
//
// Blocks related to the same k block can run in parallel because they write
// to different output blocks. So we parallelize within k slices, this
// gives us parallelism level of m x n. Before we can start any kernels
// related to k-th slice, we need to issue m lhs packing tasks and n rhs
// packing tasks.
//
// However, there is a bottleneck when we are finishing kernels for k-th
// slice (at the very end there is only 1 runnable kernel). To mitigate this
// bottleneck we allow kernels from k-th and k+1-th slices to run in
// parallel. Note that (m, n, k) and (m, n, k+1) kernels write to the same
// output block, so they must not run in parallel.
//
// This gives us the following dependency graph.
// On each k slice we have m x n kernel tasks, m lhs paking tasks and n rhs
// packing tasks.
// Kernel (m, n, k) can start when:
// - kernel (m, n, k-1) has finished
// - lhs packing (m, k) has finished
// - rhs packing (n, k) has finished
// Lhs/rhs packing can start when:
// - all k-1 packing has finished (artificially imposed to limit amount of
// parallel packing)
//
// On top of that we limit runnable tasks to two consecutive k slices.
// This is done to limit amount of memory we need for packed lhs/rhs
// (for each k slice we need m*bk + n*bk memory in packed_lhs_/packed_rhs_).
//
// state_switch_ tracks when we are ready to switch to the next k slice.
// state_kernel_[m][n] tracks when we are ready to kick off kernel (m, n).
// These variable are rolling over 3 consecutive k slices: first two we are
// actively executing + one to track completion of kernels in the second
// slice.
static const Index P = 3;
void* packed_mem_;
std::vector<LhsScalar*> packed_lhs_[P - 1];
std::vector<RhsScalar*> packed_rhs_[P - 1];
std::atomic<uint8_t>** state_kernel_[P];
// state_switch_ is frequently modified by worker threads, while other
// fields are read-only after constructor. Let's move it to a separate cache
// line to reduce cache-coherency traffic.
char pad_[128];
std::atomic<Index> state_packing_ready_[P];
std::atomic<Index> state_switch_[P];
void pack_lhs(Index m, Index k) {
const Index mend = m * gm_ + gm(m);
for (Index m1 = m * gm_; m1 < mend; m1++)
LhsPacker()(packed_lhs_[k % (P - 1)][m1],
lhs_.getSubMapper(m1 * bm_, k * bk_), bk(k), bm(m1));
if (!parallel_pack_ && shard_by_col_) {
signal_packing(k);
} else {
signal_switch(k + 1);
for (Index n = nn_ - 1; n >= 0; n--) signal_kernel(m, n, k, n == 0);
}
}
void pack_rhs(Index n, Index k) {
const Index nend = n * gn_ + gn(n);
for (Index n1 = n * gn_; n1 < nend; n1++) {
if (k == 0) {
// Zero the output memory in parallel.
// On 10000x2x10000 mm zeroing can easily take half of time.
// Zero (bn x m) row. Safe to do here because all kernels that will
// write to this memory depend on completion of this task.
// Note: don't call device_.memset() here. device_.memset() blocks on
// thread pool worker thread, which can lead to underutilization and
// deadlocks.
memset(buffer_ + n1 * bn_ * m_, 0, bn(n1) * m_ * sizeof(Scalar));
}
RhsPacker()(packed_rhs_[k % (P - 1)][n1],
rhs_.getSubMapper(k * bk_, n1 * bn_), bk(k), bn(n1));
}
if (parallel_pack_ || shard_by_col_) {
signal_switch(k + 1);
for (Index m = nm_ - 1; m >= 0; m--) signal_kernel(m, n, k, m == 0);
} else {
signal_packing(k);
}
}
void kernel(Index m, Index n, Index k) {
// Note: order of iteration matters here. Iteration over m is innermost
// because we want to reuse the same packed rhs in consequetive tasks
// (rhs fits into L2$ while lhs only into L3$).
const Index nend = n * gn_ + gn(n);
const Index mend = m * gm_ + gm(m);
if (shard_by_col_) {
for (Index n1 = n * gn_; n1 < nend; n1++) {
for (Index m1 = m * gm_; m1 < mend; m1++)
GebpKernel()(output_.getSubMapper(m1 * bm_, n1 * bn_),
packed_lhs_[k % (P - 1)][m1],
packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1),
Scalar(1), -1, -1, 0, 0);
}
} else {
for (Index m1 = m * gm_; m1 < mend; m1++)
for (Index n1 = n * gn_; n1 < nend; n1++) {
GebpKernel()(output_.getSubMapper(m1 * bm_, n1 * bn_),
packed_lhs_[k % (P - 1)][m1],
packed_rhs_[k % (P - 1)][n1], bm(m1), bk(k), bn(n1),
Scalar(1), -1, -1, 0, 0);
}
}
signal_kernel(m, n, k + 1, false);
signal_switch(k + 2);
}
void signal_packing(Index k) {
eigen_assert(!parallel_pack_);
Index s = state_packing_ready_[k % P].fetch_sub(1);
eigen_assert(s > 0);
if (s != 1) return;
state_packing_ready_[k % P] = shard_by_col_ ? nm_ : nn_;
enqueue_packing(k, shard_by_col_);
}
void signal_kernel(Index m, Index n, Index k, bool sync) {
std::atomic<uint8_t>* state = &state_kernel_[k % P][m][n];
Index s = state->load();
eigen_assert(s > 0);
if (s != 1 && state->fetch_sub(1) != 1) return;
state->store(parallel_pack_ ? 3 : 2, std::memory_order_relaxed);
if (sync)
kernel(m, n, k);
else
device_.enqueueNoNotification([=]() { kernel(m, n, k); });
}
void signal_switch(Index k, Index v = 1) {
Index s = state_switch_[k % P].fetch_sub(v);
eigen_assert(s >= v);
if (s != v) return;
// Ready to switch to the next k slice.
// Reset counter for the next iteration.
state_switch_[k % P] =
(parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_)) +
nm_ * nn_;
if (k < nk_) {
// Issue lhs/rhs packing. Their completion will in turn kick off
// kernels.
if (parallel_pack_) {
enqueue_packing(k, !shard_by_col_);
enqueue_packing(k, shard_by_col_);
} else if (shard_by_col_) {
enqueue_packing(k, false);
} else {
enqueue_packing(k, true);
}
// Termination handling.
// Because kernel completion signals k + 2 switch, we need to finish nk
// + 2 slices without issuing any tasks on nk + 1 slice. So here we
// pretend that all nk + 1 packing tasks just finish instantly; so that
// nk + 2 switch only waits for completion of nk kernels.
} else if (k == nk_) {
signal_switch(k + 1,
parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_));
} else {
done_.Notify();
}
}
// Enqueue all rhs/lhs packing for k-th slice.
void enqueue_packing(Index k, bool rhs) {
enqueue_packing_helper(0, rhs ? nn_ : nm_, k, rhs);
}
void enqueue_packing_helper(Index start, Index end, Index k, bool rhs) {
if (end - start == 1) {
if (rhs)
pack_rhs(start, k);
else
pack_lhs(start, k);
} else {
Index mid = (start + end) / 2;
device_.enqueueNoNotification(
[=]() { enqueue_packing_helper(mid, end, k, rhs); });
device_.enqueueNoNotification(
[=]() { enqueue_packing_helper(start, mid, k, rhs); });
}
}
// Block sizes with accounting for potentially incomplete last block.
Index bm(Index m) const { return m + 1 < nm0_ ? bm_ : m_ + bm_ - bm_ * nm0_; }
Index bn(Index n) const { return n + 1 < nn0_ ? bn_ : n_ + bn_ - bn_ * nn0_; }
Index bk(Index k) const { return k + 1 < nk_ ? bk_ : k_ + bk_ - bk_ * nk_; }
// Task grain sizes accounting for potentially incomplete last task.
Index gm(Index m) const { return m + 1 < nm_ ? gm_ : nm0_ + gm_ - gm_ * nm_; }
Index gn(Index n) const { return n + 1 < nn_ ? gn_ : nn0_ + gn_ - gn_ * nn_; }
Context(const Context&) = delete;
void operator=(const Context&) = delete;
};
// Decide whether we want to shard m x n contraction by columns or by rows.
static bool shardByCol(Index m, Index n, Index num_threads) {
// Note: we are comparing both n and m against Traits::nr, it is not
// a mistake. We are trying to figure out how both n and m will fit into
// the main sharding dimension.
// Sharding by column is the default
// ... unless there is enough data for vectorization over rows
if (m / num_threads >= Traits::nr &&
// and not enough data for vectorization over columns
(n / num_threads < Traits::nr ||
// ... or barely enough data for vectorization over columns,
// but it is not evenly dividable across threads
(n / num_threads < 4 * Traits::nr &&
(n % (num_threads * Traits::nr)) != 0 &&
// ... and it is evenly dividable across threads for rows
((m % (num_threads * Traits::nr)) == 0 ||
// .. or it is not evenly dividable for both dimensions but
// there is much more data over rows so that corner effects are
// mitigated.
(m / n >= 6)))))
return false;
// Wait, or if matrices are just substantially prolonged over the other
// dimension.
if (n / num_threads < 16 * Traits::nr && m > n * 32) return false;
return true;
}
Index coarsenM(Index m, Index n, Index bm, Index bn, Index bk, Index gn,
int num_threads, bool shard_by_col) const {
Index gm = 1;
Index gm1 = 1;
Index nm0 = divup(m, bm);
Index nm1 = nm0;
for (;;) {
// Find the next candidate for m grain size. It needs to result in
// different number of blocks. E.g. if we have 10 kernels, we want to try
// 5 and 10, but not 6, 7, 8 and 9.
while (gm1 <= nm0 && nm1 == divup(nm0, gm1)) gm1++;
if (gm1 > nm0) break;
// Check the candidate.
int res = checkGrain(m, n, bm, bn, bk, gm1, gn, gm, gn, num_threads,
shard_by_col);
if (res < 0) break;
nm1 = divup(nm0, gm1);
if (res == 0) continue;
// Commit new grain size.
gm = gm1;
}
return gm;
}
Index coarsenN(Index m, Index n, Index bm, Index bn, Index bk, Index gm,
int num_threads, bool shard_by_col) const {
Index gn = 1;
Index gn1 = 1;
Index nn0 = divup(n, bn);
Index nn1 = nn0;
for (;;) {
while (gn1 <= nn0 && nn1 == divup(nn0, gn1)) gn1++;
if (gn1 > nn0) break;
int res = checkGrain(m, n, bm, bn, bk, gm, gn1, gm, gn, num_threads,
shard_by_col);
if (res < 0) break;
nn1 = divup(nn0, gn1);
if (res == 0) continue;
gn = gn1;
}
return gn;
}
// checkGrain checks whether grain (gm, gn) is suitable and is better than
// (oldgm, oldgn).
int checkGrain(Index m, Index n, Index bm, Index bn, Index bk, Index gm,
Index gn, Index oldgm, Index oldgn, int num_threads,
bool shard_by_col) const {
const TensorOpCost cost =
contractionCost(bm * gm, bn * gn, bm, bn, bk, shard_by_col, true);
double taskSize = TensorCostModel<ThreadPoolDevice>::taskSize(
static_cast<double>(bm) * gm * bn * gn, cost);
// If the task is too small, then we agree on it regardless of anything
// else. Otherwise synchronization overheads will dominate.
if (taskSize < 1) return 1;
// If it is too large, then we reject it and all larger tasks.
if (taskSize > 2) return -1;
// Now we are in presumably good task size range.
// The main deciding factor here is parallelism. Consider that we have 12
// kernels and 4 threads. Grains of 2, 3 and 4 all yield good task sizes.
// But 2/4 yield 6/3 tasks, which gives us parallelism of 0.75 (at most 3/4
// of cores will be busy). While grain size 3 gives us 4 tasks, which gives
// us parallelism of 1 (we can load all cores).
Index nm0 = divup(m, bm);
Index nn0 = divup(n, bn);
Index new_tasks = divup(nm0, gm) * divup(nn0, gn);
double new_parallelism = static_cast<double>(new_tasks) /
(divup<int>(new_tasks, num_threads) * num_threads);
Index old_tasks = divup(nm0, oldgm) * divup(nn0, oldgn);
double old_parallelism = static_cast<double>(old_tasks) /
(divup<int>(old_tasks, num_threads) * num_threads);
if (new_parallelism > old_parallelism || new_parallelism == 1) return 1;
return 0;
}
#else // EIGEN_USE_SIMPLE_THREAD_POOL
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
void evalProduct(Scalar* buffer) const {
if (this->m_j_size == 1) {
this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
return;
}
evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(buffer);
}
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
void evalGemm(Scalar* buffer) const {
// columns in left side, rows in right side
const Index k = this->m_k_size;
// rows in left side
const Index m = this->m_i_size;
// columns in right side
const Index n = this->m_j_size;
// zero out the result buffer (which must be of size at least m * n * sizeof(Scalar)
this->m_device.memset(buffer, 0, m * n * sizeof(Scalar));
const int lhs_packet_size = internal::unpacket_traits<typename LeftEvaluator::PacketReturnType>::size;
const int rhs_packet_size = internal::unpacket_traits<typename RightEvaluator::PacketReturnType>::size;
typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs,
LeftEvaluator, left_nocontract_t,
contract_t, lhs_packet_size,
lhs_inner_dim_contiguous,
false, Unaligned> LhsMapper;
typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs,
RightEvaluator, right_nocontract_t,
contract_t, rhs_packet_size,
rhs_inner_dim_contiguous,
rhs_inner_dim_reordered, Unaligned> RhsMapper;
typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
// TODO: packing could be faster sometimes if we supported row major tensor mappers
typedef internal::gemm_pack_lhs<LhsScalar, Index, typename LhsMapper::SubMapper, Traits::mr,
Traits::LhsProgress, ColMajor> LhsPacker;
typedef internal::gemm_pack_rhs<RhsScalar, Index, typename RhsMapper::SubMapper, Traits::nr, ColMajor> RhsPacker;
// TODO: replace false, false with conjugate values?
typedef internal::gebp_kernel<LhsScalar, RhsScalar, Index, OutputMapper,
Traits::mr, Traits::nr, false, false> GebpKernel;
typedef internal::packLhsArg<LhsScalar, LhsMapper, Index> packLArg;
typedef internal::packRhsAndKernelArg<LhsScalar, RhsScalar, RhsMapper, OutputMapper, Index> packRKArg;
// initialize data mappers
LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
this->m_left_contracting_strides, this->m_k_strides);
RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
this->m_right_contracting_strides, this->m_k_strides);
OutputMapper output(buffer, m);
// compute block sizes (which depend on number of threads)
const Index num_threads = this->m_device.numThreads();
internal::TensorContractionBlocking<LhsMapper, RhsMapper, Index, internal::ShardByCol> blocking(k, m, n, num_threads);
Index mc = blocking.mc();
Index nc = blocking.nc();
Index kc = blocking.kc();
eigen_assert(mc <= m);
eigen_assert(nc <= n);
eigen_assert(kc <= k);
#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b))
const Index k_blocks = CEIL_DIV(k, kc);
const Index n_blocks = CEIL_DIV(n, nc);
const Index m_blocks = CEIL_DIV(m, mc);
const Index sizeA = mc * kc;
const Index sizeB = kc * nc;
/* cout << "m: " << m << " n: " << n << " k: " << k << endl;
cout << "mc: " << mc << " nc: " << nc << " kc: " << kc << endl;
cout << "m_blocks: " << m_blocks << " n_blocks: " << n_blocks << " k_blocks: " << k_blocks << endl;
cout << "num threads: " << num_threads << endl;
*/
// note: m_device.allocate should return 16 byte aligned pointers, but if blockA and blockB
// aren't 16 byte aligned segfaults will happen due to SIMD instructions
// note: You can get away with allocating just a single blockA and offsets and meet the
// the alignment requirements with the assumption that
// (Traits::mr * sizeof(ResScalar)) % 16 == 0
const Index numBlockAs = numext::mini(num_threads, m_blocks);
MaxSizeVector<LhsScalar *> blockAs(num_threads);
for (int i = 0; i < num_threads; i++) {
blockAs.push_back(static_cast<LhsScalar *>(this->m_device.allocate(sizeA * sizeof(LhsScalar))));
}
// To circumvent alignment issues, I'm just going to separately allocate the memory for each thread
// TODO: is this too much memory to allocate? This simplifies coding a lot, but is wasteful.
// Other options: (1) reuse memory when a thread finishes. con: tricky
// (2) allocate block B memory in each thread. con: overhead
MaxSizeVector<RhsScalar *> blockBs(n_blocks);
for (int i = 0; i < n_blocks; i++) {
blockBs.push_back(static_cast<RhsScalar *>(this->m_device.allocate(sizeB * sizeof(RhsScalar))));
}
// lhs_notifications starts with all null Notifications
MaxSizeVector<Notification*> lhs_notifications(num_threads, nullptr);
// this should really be numBlockAs * n_blocks;
const Index num_kernel_notifications = num_threads * n_blocks;
MaxSizeVector<Notification*> kernel_notifications(num_kernel_notifications,
nullptr);
for (Index k_block_idx = 0; k_block_idx < k_blocks; k_block_idx++) {
const Index k_start = k_block_idx * kc;
// make sure we don't overshoot right edge of left matrix
const Index actual_kc = numext::mini(k_start + kc, k) - k_start;
for (Index m_block_idx = 0; m_block_idx < m_blocks; m_block_idx += numBlockAs) {
const Index num_blocks = numext::mini(m_blocks-m_block_idx, numBlockAs);
for (Index mt_block_idx = m_block_idx; mt_block_idx < m_block_idx+num_blocks; mt_block_idx++) {
const Index m_start = mt_block_idx * mc;
const Index actual_mc = numext::mini(m_start + mc, m) - m_start;
eigen_assert(actual_mc > 0);
Index blockAId = (k_block_idx * m_blocks + mt_block_idx) % num_threads;
for (int i = 0; i < n_blocks; ++i) {
Index notification_id = (blockAId * n_blocks + i);
// Wait for any current kernels using this slot to complete
// before using it.
if (kernel_notifications[notification_id]) {
wait_until_ready(kernel_notifications[notification_id]);
delete kernel_notifications[notification_id];
}
kernel_notifications[notification_id] = new Notification();
}
const packLArg arg = {
blockAs[blockAId], // blockA
lhs, // lhs
m_start, // m
k_start, // k
actual_mc, // mc
actual_kc, // kc
};
// Delete any existing notification since we may be
// replacing it. The algorithm should ensure that there are
// no existing waiters on this notification.
delete lhs_notifications[blockAId];
lhs_notifications[blockAId] =
this->m_device.enqueue(&Self::packLhs<packLArg, LhsPacker>, arg);
}
// now start kernels.
const Index m_base_start = m_block_idx * mc;
const bool need_to_pack = m_block_idx == 0;
for (Index n_block_idx = 0; n_block_idx < n_blocks; n_block_idx++) {
const Index n_start = n_block_idx * nc;
const Index actual_nc = numext::mini(n_start + nc, n) - n_start;
// first make sure the previous kernels are all done before overwriting rhs. Also wait if
// we're going to start new k. In both cases need_to_pack is true.
if (need_to_pack) {
for (Index i = num_blocks; i < num_threads; ++i) {
Index blockAId = (k_block_idx * m_blocks + i + m_block_idx) % num_threads;
Index future_id = (blockAId * n_blocks + n_block_idx);
wait_until_ready(kernel_notifications[future_id]);
}
}
packRKArg arg = {
&blockAs, // blockA
blockBs[n_block_idx], // blockB
rhs, // rhs
output, // output
m_base_start, // m
k_start, // k
n_start, // n
mc, // mc
actual_kc, // kc
actual_nc, // nc
num_threads,
numBlockAs,
m,
k_block_idx,
m_block_idx,
n_block_idx, // n_block_idx
m_blocks, // m_blocks
n_blocks, // n_blocks
&kernel_notifications, // kernel notifications
&lhs_notifications, // lhs notifications
need_to_pack, // need_to_pack
};
// We asynchronously kick off this function, which ends up
// notifying the appropriate kernel_notifications objects,
// which this thread waits on before exiting.
this->m_device.enqueueNoNotification(&Self::packRhsAndKernel<packRKArg, RhsPacker, GebpKernel>, arg);
}
}
}
// Make sure all the kernels are done.
for (size_t i = 0; i < kernel_notifications.size(); ++i) {
wait_until_ready(kernel_notifications[i]);
delete kernel_notifications[i];
}
// No need to wait for lhs notifications since they should have
// already been waited on. Just clean them up.
for (size_t i = 0; i < lhs_notifications.size(); ++i) {
delete lhs_notifications[i];
}
// deallocate all of the memory for both A and B's
for (size_t i = 0; i < blockAs.size(); i++) {
this->m_device.deallocate(blockAs[i]);
}
for (size_t i = 0; i < blockBs.size(); i++) {
this->m_device.deallocate(blockBs[i]);
}
#undef CEIL_DIV
}
/*
* Packs a LHS block of size (mt, kc) starting at lhs(m, k). Before packing
* the LHS block, check that all of the kernels that worked on the same
* mt_block_idx in the previous m_block are done.
*/
template <typename packLArg, typename LhsPacker>
static void packLhs(const packLArg arg) {
// perform actual packing
LhsPacker pack_lhs;
pack_lhs(arg.blockA, arg.lhs.getSubMapper(arg.m_start, arg.k_start), arg.kc, arg.mc);
}
/*
* Packs a RHS block of size (kc, nc) starting at (k, n) after checking that
* all kernels in the previous block are done.
* Then for each LHS future, we wait on the future and then call GEBP
* on the area packed by the future (which starts at
* blockA + future_idx * mt * kc) on the LHS and with the full packed
* RHS block.
* The output of this GEBP is written to output(m + i * mt, n).
*/
template <typename packRKArg, typename RhsPacker, typename GebpKernel>
static void packRhsAndKernel(packRKArg arg) {
if (arg.need_to_pack) {
RhsPacker pack_rhs;
pack_rhs(arg.blockB, arg.rhs.getSubMapper(arg.k, arg.n), arg.kc, arg.nc);
}
GebpKernel gebp;
for (Index mt_block_idx = 0; mt_block_idx < arg.num_blockAs; mt_block_idx++) {
const Index m_base_start = arg.m + arg.mc*mt_block_idx;
if (m_base_start < arg.max_m) {
Index blockAId = (arg.k_block_idx * arg.m_blocks + mt_block_idx + arg.m_block_idx) % arg.num_threads;
wait_until_ready((*arg.lhs_notifications)[blockAId]);
const Index actual_mc = numext::mini(m_base_start + arg.mc, arg.max_m) - m_base_start;
gebp(arg.output.getSubMapper(m_base_start, arg.n),
(*arg.blockAs)[blockAId], arg.blockB,
actual_mc, arg.kc, arg.nc, Scalar(1), -1, -1, 0, 0);
// Notify that the kernel is done.
const Index set_idx = blockAId * arg.n_blocks + arg.n_block_idx;
(*arg.kernel_notifications)[set_idx]->Notify();
}
}
}
#endif // EIGEN_USE_SIMPLE_THREAD_POOL
TensorOpCost contractionCost(Index m, Index n, Index bm, Index bn, Index bk,
bool shard_by_col, bool prepacked) const {
const int packed_size = std::min<int>(PacketType<LhsScalar, Device>::size,
PacketType<RhsScalar, Device>::size);
const int output_packet_size = internal::unpacket_traits<PacketReturnType>::size;
const double kd = static_cast<double>(bk);
// Peak VFMA bandwidth is 0.5. However if we have not enough data for
// vectorization bandwidth drops. The 4.0 and 2.0 bandwidth is determined
// experimentally.
double computeBandwidth = bk == 1 ? 4.0 :
(shard_by_col ? bn : bm) < Traits::nr ||
(shard_by_col ? bm : bn) < Traits::mr ? 2.0 : 0.5;
#ifndef EIGEN_VECTORIZE_FMA
// Bandwidth of all of VFMA/MULPS/ADDPS is 0.5 on latest Intel processors.
// However for MULPS/ADDPS we have dependent sequence of 2 such instructions,
// so overall bandwidth is 1.0.
if (computeBandwidth == 0.5) computeBandwidth = 1.0;
#endif
// Computations.
TensorOpCost cost = TensorOpCost(0, 0, kd * computeBandwidth, true, packed_size);
// Output stores.
cost += TensorOpCost(0, sizeof(CoeffReturnType), 0, true, output_packet_size);
if (prepacked) {
// Packing and kernels are executed in different tasks. When we calculate
// task grain size we look only at kernel cost assuming that kernel
// is more expensive than packing.
return cost;
}
// Lhs/rhs loads + computations.
TensorOpCost lhsCost = this->m_leftImpl.costPerCoeff(true) * (kd / n);
TensorOpCost rhsCost = this->m_rightImpl.costPerCoeff(true) * (kd / m);
// Lhs packing memory cost does not contribute considerably to overall
// execution time because lhs is prefetched early and accessed sequentially.
if (shard_by_col)
lhsCost.dropMemoryCost();
else
rhsCost.dropMemoryCost();
return cost + lhsCost + rhsCost;
}
};
} // end namespace Eigen
#endif // EIGEN_USE_THREADS
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H
| 44,494 | 41.255461 | 142 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
namespace Eigen {
/** \class TensorConversionOp
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor conversion class. This class makes it possible to vectorize
* type casting operations when the number of scalars per packet in the source
* and the destination type differ
*/
namespace internal {
template<typename TargetType, typename XprType>
struct traits<TensorConversionOp<TargetType, XprType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef TargetType Scalar;
typedef typename traits<XprType>::StorageKind StorageKind;
typedef typename traits<XprType>::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = traits<XprType>::NumDimensions;
static const int Layout = traits<XprType>::Layout;
enum { Flags = 0 };
};
template<typename TargetType, typename XprType>
struct eval<TensorConversionOp<TargetType, XprType>, Eigen::Dense>
{
typedef const TensorConversionOp<TargetType, XprType>& type;
};
template<typename TargetType, typename XprType>
struct nested<TensorConversionOp<TargetType, XprType>, 1, typename eval<TensorConversionOp<TargetType, XprType> >::type>
{
typedef TensorConversionOp<TargetType, XprType> type;
};
} // end namespace internal
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket, int SrcCoeffRatio, int TgtCoeffRatio>
struct PacketConverter {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketConverter(const TensorEvaluator& impl)
: m_impl(impl) {}
template<int LoadMode, typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<LoadMode>(index));
}
private:
const TensorEvaluator& m_impl;
};
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 2, 1> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketConverter(const TensorEvaluator& impl)
: m_impl(impl) {}
template<int LoadMode, typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
SrcPacket src1 = m_impl.template packet<LoadMode>(index);
SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2);
return result;
}
private:
const TensorEvaluator& m_impl;
};
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 4, 1> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketConverter(const TensorEvaluator& impl)
: m_impl(impl) {}
template<int LoadMode, typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
SrcPacket src1 = m_impl.template packet<LoadMode>(index);
SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
SrcPacket src3 = m_impl.template packet<LoadMode>(index + 2 * SrcPacketSize);
SrcPacket src4 = m_impl.template packet<LoadMode>(index + 3 * SrcPacketSize);
TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2, src3, src4);
return result;
}
private:
const TensorEvaluator& m_impl;
};
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 1, 2> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketConverter(const TensorEvaluator& impl)
: m_impl(impl), m_maxIndex(impl.dimensions().TotalSize()) {}
template<int LoadMode, typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
// Only call m_impl.packet() when we have direct access to the underlying data. This
// ensures that we don't compute the subexpression twice. We may however load some
// coefficients twice, but in practice this doesn't negatively impact performance.
if (m_impl.data() && (index + SrcPacketSize < m_maxIndex)) {
// Force unaligned memory loads since we can't ensure alignment anymore
return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<Unaligned>(index));
} else {
const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size;
typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
typedef typename internal::unpacket_traits<TgtPacket>::type TgtType;
internal::scalar_cast_op<SrcType, TgtType> converter;
EIGEN_ALIGN_MAX typename internal::unpacket_traits<TgtPacket>::type values[TgtPacketSize];
for (int i = 0; i < TgtPacketSize; ++i) {
values[i] = converter(m_impl.coeff(index+i));
}
TgtPacket rslt = internal::pload<TgtPacket>(values);
return rslt;
}
}
private:
const TensorEvaluator& m_impl;
const typename TensorEvaluator::Index m_maxIndex;
};
template<typename TargetType, typename XprType>
class TensorConversionOp : public TensorBase<TensorConversionOp<TargetType, XprType>, ReadOnlyAccessors>
{
public:
typedef typename internal::traits<TensorConversionOp>::Scalar Scalar;
typedef typename internal::traits<TensorConversionOp>::StorageKind StorageKind;
typedef typename internal::traits<TensorConversionOp>::Index Index;
typedef typename internal::nested<TensorConversionOp>::type Nested;
typedef Scalar CoeffReturnType;
typedef typename NumTraits<Scalar>::Real RealScalar;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConversionOp(const XprType& xpr)
: m_xpr(xpr) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
};
template <bool SameType, typename Eval, typename Scalar> struct ConversionSubExprEval {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Eval& impl, Scalar*) {
impl.evalSubExprsIfNeeded(NULL);
return true;
}
};
template <typename Eval, typename Scalar> struct ConversionSubExprEval<true, Eval, Scalar> {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Eval& impl, Scalar* data) {
return impl.evalSubExprsIfNeeded(data);
}
};
// Eval as rvalue
template<typename TargetType, typename ArgType, typename Device>
struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
{
typedef TensorConversionOp<TargetType, ArgType> XprType;
typedef typename XprType::Index Index;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
typedef TargetType Scalar;
typedef TargetType CoeffReturnType;
typedef typename internal::remove_all<typename internal::traits<ArgType>::Scalar>::type SrcType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename PacketType<SrcType, Device>::type PacketSourceType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = false,
PacketAccess = true,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device)
{
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data)
{
return ConversionSubExprEval<internal::is_same<TargetType, SrcType>::value, TensorEvaluator<ArgType, Device>, Scalar>::run(m_impl, data);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
{
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
internal::scalar_cast_op<SrcType, TargetType> converter;
return converter(m_impl.coeff(index));
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
const bool Vectorizable = TensorEvaluator<ArgType, Device>::PacketAccess &
internal::type_casting_traits<SrcType, TargetType>::VectorizedCast;
return PacketConv<LoadMode, Vectorizable>::run(m_impl, index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
const double cast_cost = TensorOpCost::CastCost<SrcType, TargetType>();
if (vectorized) {
const double SrcCoeffRatio =
internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
const double TgtCoeffRatio =
internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
return m_impl.costPerCoeff(vectorized) * (SrcCoeffRatio / PacketSize) +
TensorOpCost(0, 0, TgtCoeffRatio * (cast_cost / PacketSize));
} else {
return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, cast_cost);
}
}
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
protected:
template <int LoadMode, bool ActuallyVectorize>
struct PacketConv {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
internal::scalar_cast_op<SrcType, TargetType> converter;
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
for (int i = 0; i < PacketSize; ++i) {
values[i] = converter(impl.coeff(index+i));
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
};
template <int LoadMode>
struct PacketConv<LoadMode, true> {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
const int SrcCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
const int TgtCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
PacketConverter<TensorEvaluator<ArgType, Device>, PacketSourceType, PacketReturnType,
SrcCoeffRatio, TgtCoeffRatio> converter(impl);
return converter.template packet<LoadMode>(index);
}
};
TensorEvaluator<ArgType, Device> m_impl;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
| 11,006 | 38.310714 | 141 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H
namespace Eigen {
/** \class TensorConvolution
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor convolution class.
*
*
*/
namespace internal {
template <typename Index, typename InputDims, int NumKernelDims, int Layout>
class IndexMapper {
public:
IndexMapper(const InputDims& input_dims, const array<Index, NumKernelDims>& kernel_dims,
const array<Index, NumKernelDims>& indices) {
array<Index, NumDims> dimensions = input_dims;
for (int i = 0; i < NumKernelDims; ++i) {
const Index index = indices[i];
const Index input_dim = input_dims[index];
const Index kernel_dim = kernel_dims[i];
const Index result_dim = input_dim - kernel_dim + 1;
dimensions[index] = result_dim;
}
array<Index, NumDims> inputStrides;
array<Index, NumDims> outputStrides;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
inputStrides[0] = 1;
outputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
inputStrides[i] = inputStrides[i-1] * input_dims[i-1];
outputStrides[i] = outputStrides[i-1] * dimensions[i-1];
}
} else {
inputStrides[NumDims - 1] = 1;
outputStrides[NumDims - 1] = 1;
for (int i = static_cast<int>(NumDims) - 2; i >= 0; --i) {
inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1];
outputStrides[i] = outputStrides[i + 1] * dimensions[i + 1];
}
}
array<Index, NumDims> cudaInputDimensions;
array<Index, NumDims> cudaOutputDimensions;
array<Index, NumDims> tmp = dimensions;
array<Index, NumDims> ordering;
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
for (int i = 0; i < NumKernelDims; ++i) {
const Index index = i + offset;
ordering[index] = indices[i];
tmp[indices[i]] = -1;
cudaInputDimensions[index] = input_dims[indices[i]];
cudaOutputDimensions[index] = dimensions[indices[i]];
}
int written = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? NumKernelDims
: 0;
for (int i = 0; i < NumDims; ++i) {
if (tmp[i] >= 0) {
ordering[written] = i;
cudaInputDimensions[written] = input_dims[i];
cudaOutputDimensions[written] = dimensions[i];
++written;
}
}
for (int i = 0; i < NumDims; ++i) {
m_inputStrides[i] = inputStrides[ordering[i]];
m_outputStrides[i] = outputStrides[ordering[i]];
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = 0; i < NumDims; ++i) {
if (i > NumKernelDims) {
m_cudaInputStrides[i] =
m_cudaInputStrides[i - 1] * cudaInputDimensions[i - 1];
m_cudaOutputStrides[i] =
m_cudaOutputStrides[i - 1] * cudaOutputDimensions[i - 1];
} else {
m_cudaInputStrides[i] = 1;
m_cudaOutputStrides[i] = 1;
}
}
} else {
for (int i = NumDims - 1; i >= 0; --i) {
if (i + 1 < offset) {
m_cudaInputStrides[i] =
m_cudaInputStrides[i + 1] * cudaInputDimensions[i + 1];
m_cudaOutputStrides[i] =
m_cudaOutputStrides[i + 1] * cudaOutputDimensions[i + 1];
} else {
m_cudaInputStrides[i] = 1;
m_cudaOutputStrides[i] = 1;
}
}
}
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputPlaneToTensorInputOffset(Index p) const {
Index inputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int d = NumDims - 1; d > NumKernelDims; --d) {
const Index idx = p / m_cudaInputStrides[d];
inputIndex += idx * m_inputStrides[d];
p -= idx * m_cudaInputStrides[d];
}
inputIndex += p * m_inputStrides[NumKernelDims];
} else {
std::ptrdiff_t limit = 0;
if (NumKernelDims < NumDims) {
limit = NumDims - NumKernelDims - 1;
}
for (int d = 0; d < limit; ++d) {
const Index idx = p / m_cudaInputStrides[d];
inputIndex += idx * m_inputStrides[d];
p -= idx * m_cudaInputStrides[d];
}
inputIndex += p * m_inputStrides[limit];
}
return inputIndex;
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputPlaneToTensorOutputOffset(Index p) const {
Index outputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int d = NumDims - 1; d > NumKernelDims; --d) {
const Index idx = p / m_cudaOutputStrides[d];
outputIndex += idx * m_outputStrides[d];
p -= idx * m_cudaOutputStrides[d];
}
outputIndex += p * m_outputStrides[NumKernelDims];
} else {
std::ptrdiff_t limit = 0;
if (NumKernelDims < NumDims) {
limit = NumDims - NumKernelDims - 1;
}
for (int d = 0; d < limit; ++d) {
const Index idx = p / m_cudaOutputStrides[d];
outputIndex += idx * m_outputStrides[d];
p -= idx * m_cudaOutputStrides[d];
}
outputIndex += p * m_outputStrides[limit];
}
return outputIndex;
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_inputStrides[offset];
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_outputStrides[offset];
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1];
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1];
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j, Index k) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1] +
k * m_inputStrides[offset + 2];
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j, Index k) const {
const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: NumDims - NumKernelDims;
return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1] +
k * m_outputStrides[offset + 2];
}
private:
static const int NumDims = internal::array_size<InputDims>::value;
array<Index, NumDims> m_inputStrides;
array<Index, NumDims> m_outputStrides;
array<Index, NumDims> m_cudaInputStrides;
array<Index, NumDims> m_cudaOutputStrides;
};
template<typename Dimensions, typename InputXprType, typename KernelXprType>
struct traits<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename promote_storage_type<typename InputXprType::Scalar,
typename KernelXprType::Scalar>::ret Scalar;
typedef typename promote_storage_type<typename traits<InputXprType>::StorageKind,
typename traits<KernelXprType>::StorageKind>::ret StorageKind;
typedef typename promote_index_type<typename traits<InputXprType>::Index,
typename traits<KernelXprType>::Index>::type Index;
typedef typename InputXprType::Nested LhsNested;
typedef typename KernelXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const int NumDimensions = traits<InputXprType>::NumDimensions;
static const int Layout = traits<InputXprType>::Layout;
enum {
Flags = 0
};
};
template<typename Dimensions, typename InputXprType, typename KernelXprType>
struct eval<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType>, Eigen::Dense>
{
typedef const TensorConvolutionOp<Dimensions, InputXprType, KernelXprType>& type;
};
template<typename Dimensions, typename InputXprType, typename KernelXprType>
struct nested<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType>, 1, typename eval<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> >::type>
{
typedef TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> type;
};
} // end namespace internal
template<typename Indices, typename InputXprType, typename KernelXprType>
class TensorConvolutionOp : public TensorBase<TensorConvolutionOp<Indices, InputXprType, KernelXprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorConvolutionOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename internal::promote_storage_type<typename InputXprType::CoeffReturnType,
typename KernelXprType::CoeffReturnType>::ret CoeffReturnType;
typedef typename Eigen::internal::nested<TensorConvolutionOp>::type Nested;
typedef typename Eigen::internal::traits<TensorConvolutionOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorConvolutionOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConvolutionOp(const InputXprType& input, const KernelXprType& kernel, const Indices& dims)
: m_input_xpr(input), m_kernel_xpr(kernel), m_indices(dims) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Indices& indices() const { return m_indices; }
/** \returns the nested expressions */
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const typename internal::remove_all<typename InputXprType::Nested>::type&
inputExpression() const { return m_input_xpr; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const typename internal::remove_all<typename KernelXprType::Nested>::type&
kernelExpression() const { return m_kernel_xpr; }
protected:
typename InputXprType::Nested m_input_xpr;
typename KernelXprType::Nested m_kernel_xpr;
const Indices m_indices;
};
template<typename Indices, typename InputArgType, typename KernelArgType, typename Device>
struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelArgType>, Device>
{
typedef TensorConvolutionOp<Indices, InputArgType, KernelArgType> XprType;
static const int NumDims = internal::array_size<typename TensorEvaluator<InputArgType, Device>::Dimensions>::value;
static const int NumKernelDims = internal::array_size<Indices>::value;
typedef typename XprType::Index Index;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = TensorEvaluator<InputArgType, Device>::IsAligned & TensorEvaluator<KernelArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<InputArgType, Device>::PacketAccess & TensorEvaluator<KernelArgType, Device>::PacketAccess,
Layout = TensorEvaluator<InputArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_kernel(NULL), m_local_kernel(false), m_device(device)
{
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<InputArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<KernelArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
const typename TensorEvaluator<InputArgType, Device>::Dimensions& input_dims = m_inputImpl.dimensions();
const typename TensorEvaluator<KernelArgType, Device>::Dimensions& kernel_dims = m_kernelImpl.dimensions();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_inputStride[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_inputStride[i] = m_inputStride[i - 1] * input_dims[i - 1];
}
} else {
m_inputStride[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_inputStride[i] = m_inputStride[i + 1] * input_dims[i + 1];
}
}
m_dimensions = m_inputImpl.dimensions();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = 0; i < NumKernelDims; ++i) {
const Index index = op.indices()[i];
const Index input_dim = input_dims[index];
const Index kernel_dim = kernel_dims[i];
const Index result_dim = input_dim - kernel_dim + 1;
m_dimensions[index] = result_dim;
if (i > 0) {
m_kernelStride[i] = m_kernelStride[i - 1] * kernel_dims[i - 1];
} else {
m_kernelStride[0] = 1;
}
m_indexStride[i] = m_inputStride[index];
}
m_outputStride[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_outputStride[i] = m_outputStride[i - 1] * m_dimensions[i - 1];
}
} else {
for (int i = NumKernelDims - 1; i >= 0; --i) {
const Index index = op.indices()[i];
const Index input_dim = input_dims[index];
const Index kernel_dim = kernel_dims[i];
const Index result_dim = input_dim - kernel_dim + 1;
m_dimensions[index] = result_dim;
if (i < NumKernelDims - 1) {
m_kernelStride[i] = m_kernelStride[i + 1] * kernel_dims[i + 1];
} else {
m_kernelStride[NumKernelDims - 1] = 1;
}
m_indexStride[i] = m_inputStride[index];
}
m_outputStride[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_outputStride[i] = m_outputStride[i + 1] * m_dimensions[i + 1];
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
m_inputImpl.evalSubExprsIfNeeded(NULL);
preloadKernel();
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_inputImpl.cleanup();
if (m_local_kernel) {
m_device.deallocate((void*)m_kernel);
m_local_kernel = false;
}
m_kernel = NULL;
}
void evalTo(typename XprType::Scalar* buffer) {
evalSubExprsIfNeeded(NULL);
for (int i = 0; i < dimensions().TotalSize(); ++i) {
buffer[i] += coeff(i);
}
cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
CoeffReturnType result = CoeffReturnType(0);
convolve(firstInput(index), 0, NumKernelDims-1, result);
return result;
}
template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(const Index index) const
{
Index indices[2] = {index, index+PacketSize-1};
Index startInputs[2] = {0, 0};
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx0 = indices[0] / m_outputStride[i];
const Index idx1 = indices[1] / m_outputStride[i];
startInputs[0] += idx0 * m_inputStride[i];
startInputs[1] += idx1 * m_inputStride[i];
indices[0] -= idx0 * m_outputStride[i];
indices[1] -= idx1 * m_outputStride[i];
}
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx0 = indices[0] / m_outputStride[i];
const Index idx1 = indices[1] / m_outputStride[i];
startInputs[0] += idx0 * m_inputStride[i];
startInputs[1] += idx1 * m_inputStride[i];
indices[0] -= idx0 * m_outputStride[i];
indices[1] -= idx1 * m_outputStride[i];
}
}
startInputs[0] += indices[0];
startInputs[1] += indices[1];
if (startInputs[1]-startInputs[0] == PacketSize-1) {
PacketReturnType result = internal::pset1<PacketReturnType>(0);
convolvePacket(startInputs[0], 0, NumKernelDims-1, result);
return result;
} else {
EIGEN_ALIGN_MAX Scalar data[PacketSize];
data[0] = Scalar(0);
convolve(startInputs[0], 0, NumKernelDims-1, data[0]);
for (int i = 1; i < PacketSize-1; ++i) {
data[i] = Scalar(0);
convolve(firstInput(index+i), 0, NumKernelDims-1, data[i]);
}
data[PacketSize-1] = Scalar(0);
convolve(startInputs[1], 0, NumKernelDims-1, data[PacketSize-1]);
return internal::pload<PacketReturnType>(data);
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
const double kernel_size = m_kernelImpl.dimensions().TotalSize();
// We ignore the use of fused multiply-add.
const double convolve_compute_cost =
TensorOpCost::AddCost<Scalar>() + TensorOpCost::MulCost<Scalar>();
const double firstIndex_compute_cost =
NumDims *
(2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() +
TensorOpCost::DivCost<Index>());
return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) +
kernel_size * (m_inputImpl.costPerCoeff(vectorized) +
m_kernelImpl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, convolve_compute_cost, vectorized,
PacketSize));
}
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
private:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
Index startInput = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStride[i];
startInput += idx * m_inputStride[i];
index -= idx * m_outputStride[i];
}
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStride[i];
startInput += idx * m_inputStride[i];
index -= idx * m_outputStride[i];
}
}
startInput += index;
return startInput;
}
EIGEN_DEVICE_FUNC void convolve(Index firstIndex, Index firstKernel, int DimIndex, CoeffReturnType& accum) const {
for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) {
const Index input = firstIndex + j * m_indexStride[DimIndex];
const Index kernel = firstKernel + j * m_kernelStride[DimIndex];
if (DimIndex > 0) {
convolve(input, kernel, DimIndex-1, accum);
} else {
accum += m_inputImpl.coeff(input) * m_kernel[kernel];
}
}
}
template <typename Packet>
EIGEN_DEVICE_FUNC void convolvePacket(Index firstIndex, Index firstKernel, int DimIndex, Packet& accum) const {
for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) {
const Index input = firstIndex + j * m_indexStride[DimIndex];
const Index kernel = firstKernel + j * m_kernelStride[DimIndex];
if (DimIndex > 0) {
convolvePacket(input, kernel, DimIndex-1, accum);
} else {
accum = internal::pmadd<Packet>(m_inputImpl.template packet<Unaligned>(input), internal::pset1<Packet>(m_kernel[kernel]), accum);
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void preloadKernel() {
// Don't make a local copy of the kernel unless we have to (i.e. it's an
// expression that needs to be evaluated)
const Scalar* in_place = m_kernelImpl.data();
if (in_place) {
m_kernel = in_place;
m_local_kernel = false;
} else {
size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar);
Scalar* local = (Scalar*)m_device.allocate(kernel_sz);
typedef TensorEvalToOp<const KernelArgType> EvalTo;
EvalTo evalToTmp(local, m_kernelArg);
const bool PacketAccess = internal::IsVectorizable<Device, KernelArgType>::value;
internal::TensorExecutor<const EvalTo, Device, PacketAccess>::run(evalToTmp, m_device);
m_kernel = local;
m_local_kernel = true;
}
}
array<Index, NumDims> m_inputStride;
array<Index, NumDims> m_outputStride;
array<Index, NumKernelDims> m_indexStride;
array<Index, NumKernelDims> m_kernelStride;
TensorEvaluator<InputArgType, Device> m_inputImpl;
TensorEvaluator<KernelArgType, Device> m_kernelImpl;
Dimensions m_dimensions;
KernelArgType m_kernelArg;
const Scalar* m_kernel;
bool m_local_kernel;
const Device& m_device;
};
// Use an optimized implementation of the evaluation code for GPUs whenever possible.
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
template <int StaticKernelSize>
struct GetKernelSize {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int /*kernelSize*/) const {
return StaticKernelSize;
}
};
template <>
struct GetKernelSize<Dynamic> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int kernelSize) const {
return kernelSize;
}
};
template <typename InputEvaluator, typename Index, typename InputDims,
int StaticKernelSize>
__global__ void EigenConvolutionKernel1D(
InputEvaluator eval,
const internal::IndexMapper<Index, InputDims, 1, InputEvaluator::Layout>
indexMapper,
const float* __restrict kernel, const int numPlanes, const int numX,
const int maxX, const int kernelSize, float* buffer) {
extern __shared__ float s[];
const int first_x = blockIdx.x * maxX;
const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1;
const int num_x_input = last_x - first_x + GetKernelSize<StaticKernelSize>()(kernelSize);
const int num_x_output = last_x - first_x + 1;
const int first_plane = blockIdx.y * blockDim.y;
const int plane_stride = blockDim.y * gridDim.y;
for (int p = first_plane + threadIdx.y; p < numPlanes; p += plane_stride) {
// Load inputs to shared memory
const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p);
const int plane_kernel_offset = threadIdx.y * num_x_input;
#pragma unroll
for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) {
const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x);
s[i + plane_kernel_offset] = eval.coeff(tensor_index);
}
__syncthreads();
// Compute the convolution
const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p);
#pragma unroll
for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) {
const int kernel_offset = plane_kernel_offset + i;
float result = 0.0f;
#pragma unroll
for (int k = 0; k < GetKernelSize<StaticKernelSize>()(kernelSize); ++k) {
result += s[k + kernel_offset] * kernel[k];
}
const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x);
buffer[tensor_index] = result;
}
__syncthreads();
}
};
template <typename InputEvaluator, typename Index, typename InputDims,
int StaticKernelSizeX, int StaticKernelSizeY>
__global__ void EigenConvolutionKernel2D(
InputEvaluator eval,
const internal::IndexMapper<Index, InputDims, 2, InputEvaluator::Layout>
indexMapper,
const float* __restrict kernel, const int numPlanes, const int numX,
const int maxX, const int numY, const int maxY, const int kernelSizeX,
const int kernelSizeY, float* buffer) {
extern __shared__ float s[];
const int first_x = blockIdx.x * maxX;
const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1;
const int num_x_input = last_x - first_x + GetKernelSize<StaticKernelSizeX>()(kernelSizeX);
const int num_x_output = last_x - first_x + 1;
const int first_y = blockIdx.y * maxY;
const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1;
const int num_y_input = last_y - first_y + GetKernelSize<StaticKernelSizeY>()(kernelSizeY);
const int num_y_output = last_y - first_y + 1;
const int first_plane = blockIdx.z * blockDim.z;
const int plane_stride = blockDim.z * gridDim.z;
for (int p = first_plane + threadIdx.z; p < numPlanes; p += plane_stride) {
const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p);
const int plane_kernel_offset = threadIdx.z * num_y_input;
// Load inputs to shared memory
#pragma unroll
for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) {
const int input_offset = num_x_input * (j + plane_kernel_offset);
#pragma unroll
for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) {
const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x, j+first_y);
s[i + input_offset] = eval.coeff(tensor_index);
}
}
__syncthreads();
// Convolution
const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p);
#pragma unroll
for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) {
#pragma unroll
for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) {
float result = 0.0f;
#pragma unroll
for (int l = 0; l < GetKernelSize<StaticKernelSizeY>()(kernelSizeY); ++l) {
const int kernel_offset = kernelSizeX * l;
const int input_offset = i + num_x_input * (j + l + plane_kernel_offset);
#pragma unroll
for (int k = 0; k < GetKernelSize<StaticKernelSizeX>()(kernelSizeX); ++k) {
result += s[k + input_offset] * kernel[k + kernel_offset];
}
}
const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x, j+first_y);
buffer[tensor_index] = result;
}
}
__syncthreads();
}
};
template <typename InputEvaluator, typename Index, typename InputDims>
__global__ void EigenConvolutionKernel3D(
InputEvaluator eval,
const internal::IndexMapper<Index, InputDims, 3, InputEvaluator::Layout>
indexMapper,
const float* __restrict kernel, const size_t numPlanes, const size_t numX,
const size_t maxX, const size_t numY, const size_t maxY, const size_t numZ,
const size_t maxZ, const size_t kernelSizeX, const size_t kernelSizeY,
const size_t kernelSizeZ, float* buffer) {
extern __shared__ float s[];
// Load inputs to shared memory
const int first_x = blockIdx.x * maxX;
const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1;
const int num_x_input = last_x - first_x + kernelSizeX;
const int first_y = blockIdx.y * maxY;
const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1;
const int num_y_input = last_y - first_y + kernelSizeY;
const int first_z = blockIdx.z * maxZ;
const int last_z = (first_z + maxZ < numZ ? first_z + maxZ : numZ) - 1;
const int num_z_input = last_z - first_z + kernelSizeZ;
for (int p = 0; p < numPlanes; ++p) {
const int plane_input_offset = indexMapper.mapCudaInputPlaneToTensorInputOffset(p);
const int plane_kernel_offset = 0;
for (int k = threadIdx.z; k < num_z_input; k += blockDim.z) {
for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) {
for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) {
const int tensor_index = plane_input_offset + indexMapper.mapCudaInputKernelToTensorInputOffset(i+first_x, j+first_y, k+first_z);
s[i + num_x_input * (j + num_y_input * (k + plane_kernel_offset))] = eval.coeff(tensor_index);
}
}
}
__syncthreads();
// Convolution
const int num_z_output = last_z - first_z + 1;
const int num_y_output = last_y - first_y + 1;
const int num_x_output = last_x - first_x + 1;
const int plane_output_offset = indexMapper.mapCudaOutputPlaneToTensorOutputOffset(p);
for (int k = threadIdx.z; k < num_z_output; k += blockDim.z) {
for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) {
for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) {
float result = 0.0f;
for (int n = 0; n < kernelSizeZ; ++n) {
for (int m = 0; m < kernelSizeY; ++m) {
for (int l = 0; l < kernelSizeX; ++l) {
result += s[i + l + num_x_input * (j + m + num_y_input * (k + n + plane_kernel_offset))] * kernel[l + kernelSizeX * (m + kernelSizeY * n)];
}
}
}
const int tensor_index = plane_output_offset + indexMapper.mapCudaOutputKernelToTensorOutputOffset(i+first_x, j+first_y, k+first_z);
buffer[tensor_index] = result;
}
}
}
__syncthreads();
}
};
template<typename Indices, typename InputArgType, typename KernelArgType>
struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelArgType>, GpuDevice>
{
typedef TensorConvolutionOp<Indices, InputArgType, KernelArgType> XprType;
static const int NumDims = internal::array_size<typename TensorEvaluator<InputArgType, GpuDevice>::Dimensions>::value;
static const int NumKernelDims = internal::array_size<Indices>::value;
typedef typename XprType::Index Index;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename TensorEvaluator<KernelArgType, GpuDevice>::Dimensions KernelDimensions;
enum {
IsAligned = TensorEvaluator<InputArgType, GpuDevice>::IsAligned & TensorEvaluator<KernelArgType, GpuDevice>::IsAligned,
PacketAccess = false,
Layout = TensorEvaluator<InputArgType, GpuDevice>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const GpuDevice& device)
: m_inputImpl(op.inputExpression(), device), m_kernelArg(op.kernelExpression()), m_kernelImpl(op.kernelExpression(), device), m_indices(op.indices()), m_buf(NULL), m_kernel(NULL), m_local_kernel(false), m_device(device)
{
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<InputArgType, GpuDevice>::Layout) == static_cast<int>(TensorEvaluator<KernelArgType, GpuDevice>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
const typename TensorEvaluator<InputArgType, GpuDevice>::Dimensions& input_dims = m_inputImpl.dimensions();
const typename TensorEvaluator<KernelArgType, GpuDevice>::Dimensions& kernel_dims = m_kernelImpl.dimensions();
m_dimensions = m_inputImpl.dimensions();
for (int i = 0; i < NumKernelDims; ++i) {
const Index index = op.indices()[i];
const Index input_dim = input_dims[index];
const Index kernel_dim = kernel_dims[i];
const Index result_dim = input_dim - kernel_dim + 1;
m_dimensions[index] = result_dim;
}
}
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, GpuDevice>::type PacketReturnType;
typedef typename InputArgType::Scalar Scalar;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
preloadKernel();
m_inputImpl.evalSubExprsIfNeeded(NULL);
if (data) {
executeEval(data);
return false;
} else {
m_buf = (Scalar*)m_device.allocate(dimensions().TotalSize() * sizeof(Scalar));
executeEval(m_buf);
return true;
}
}
EIGEN_STRONG_INLINE void cleanup() {
m_inputImpl.cleanup();
if (m_buf) {
m_device.deallocate(m_buf);
m_buf = NULL;
}
if (m_local_kernel) {
m_device.deallocate((void*)m_kernel);
m_local_kernel = false;
}
m_kernel = NULL;
}
EIGEN_STRONG_INLINE void preloadKernel() {
// Don't make a local copy of the kernel unless we have to (i.e. it's an
// expression that needs to be evaluated)
const Scalar* in_place = m_kernelImpl.data();
if (in_place) {
m_kernel = in_place;
m_local_kernel = false;
} else {
size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar);
Scalar* local = (Scalar*)m_device.allocate(kernel_sz);
typedef TensorEvalToOp<const KernelArgType> EvalTo;
EvalTo evalToTmp(local, m_kernelArg);
const bool PacketAccess = internal::IsVectorizable<GpuDevice, KernelArgType>::value;
internal::TensorExecutor<const EvalTo, GpuDevice, PacketAccess>::run(evalToTmp, m_device);
m_kernel = local;
m_local_kernel = true;
}
}
static unsigned int ceil(unsigned int num, unsigned int denom) {
const unsigned int rounded_toward_zero = num / denom;
if (num > rounded_toward_zero * denom) {
return rounded_toward_zero + 1;
}
return rounded_toward_zero;
}
void executeEval(Scalar* data) const {
typedef typename TensorEvaluator<InputArgType, GpuDevice>::Dimensions InputDims;
const int maxSharedMem = m_device.sharedMemPerBlock();
const int maxThreadsPerBlock = m_device.maxCudaThreadsPerBlock();
const int maxBlocksPerProcessor = m_device.maxCudaThreadsPerMultiProcessor() / maxThreadsPerBlock;
const int numMultiProcessors = m_device.getNumCudaMultiProcessors();
const int warpSize = 32;
switch (NumKernelDims) {
case 1: {
const int kernel_size = m_kernelImpl.dimensions().TotalSize();
const int numX = dimensions()[m_indices[0]];
const int numP = dimensions().TotalSize() / numX;
int maxX;
dim3 block_size;
const int single_stride_dim =
static_cast<int>(Layout) == static_cast<int>(ColMajor)
? 0
: m_inputImpl.dimensions().rank() - 1;
if (m_indices[0] == single_stride_dim) {
// Maximum the reuse
const int inner_dim = ((maxSharedMem / (sizeof(Scalar)) - kernel_size + 1 + 31) / 32) * 32;
maxX = numext::mini<int>(inner_dim, numX);
const int maxP = numext::mini<int>(maxSharedMem / ((kernel_size - 1 + maxX) * sizeof(Scalar)), numP);
block_size.x = numext::mini(maxThreadsPerBlock, maxX);
block_size.y = numext::mini<int>(maxThreadsPerBlock / block_size.x, maxP);
}
else {
// Read as much as possible alongside the inner most dimension, that is the plane
const int inner_dim = maxSharedMem / ((warpSize + kernel_size) * sizeof(Scalar));
const int maxP = numext::mini<int>(inner_dim, numP);
maxX = numext::mini<int>(maxSharedMem / (inner_dim * sizeof(Scalar)) - kernel_size + 1, numX);
block_size.x = numext::mini(warpSize, maxX);
block_size.y = numext::mini<int>(maxThreadsPerBlock/block_size.x, maxP);
}
const int shared_mem = block_size.y * (maxX + kernel_size - 1) * sizeof(Scalar);
assert(shared_mem <= maxSharedMem);
const int num_x_blocks = ceil(numX, maxX);
const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem);
const int num_y_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks);
dim3 num_blocks(num_x_blocks, numext::mini<int>(num_y_blocks, ceil(numP, block_size.y)));
//cout << "launching 1D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " maxX: " << maxX << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl;
const array<Index, 1> indices(m_indices[0]);
const array<Index, 1> kernel_dims(m_kernelImpl.dimensions()[0]);
internal::IndexMapper<Index, InputDims, 1, Layout> indexMapper(
m_inputImpl.dimensions(), kernel_dims, indices);
switch(kernel_size) {
case 4: {
LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 4, data);
break;
}
case 7: {
LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 7, data);
break;
}
default: {
LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, kernel_size, data);
}
}
break;
}
case 2: {
const int idxX =
static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : 1;
const int idxY =
static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 1 : 0;
const int kernel_size_x = m_kernelImpl.dimensions()[idxX];
const int kernel_size_y = m_kernelImpl.dimensions()[idxY];
const int numX = dimensions()[m_indices[idxX]];
const int numY = dimensions()[m_indices[idxY]];
const int numP = dimensions().TotalSize() / (numX*numY);
const float scaling_factor = sqrtf(static_cast<float>(maxSharedMem) / (sizeof(Scalar) * kernel_size_y * kernel_size_x));
// Snap maxX to warp size
int inner_dim = ((static_cast<int>(scaling_factor * kernel_size_x) - kernel_size_x + 1 + 32) / 32) * 32;
const int maxX = numext::mini<int>(inner_dim, numX);
const int maxY = numext::mini<int>(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1)) - kernel_size_y + 1, numY);
const int maxP = numext::mini<int>(maxSharedMem / ((kernel_size_x - 1 + maxX) * (kernel_size_y - 1 + maxY) * sizeof(Scalar)), numP);
dim3 block_size;
block_size.x = numext::mini(1024, maxX);
block_size.y = numext::mini<int>(1024/block_size.x, maxY);
block_size.z = numext::mini<int>(1024/(block_size.x*block_size.y), maxP);
const int shared_mem = block_size.z * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * sizeof(Scalar);
assert(shared_mem <= maxSharedMem);
const int num_x_blocks = ceil(numX, maxX);
const int num_y_blocks = ceil(numY, maxY);
const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem);
const int num_z_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks * num_y_blocks);
dim3 num_blocks(num_x_blocks, num_y_blocks, numext::mini<int>(num_z_blocks, ceil(numP, block_size.z)));
//cout << "launching 2D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " maxX: " << maxX << " maxY: " << maxY << " maxP: " << maxP << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl;
const array<Index, 2> indices(m_indices[idxX], m_indices[idxY]);
const array<Index, 2> kernel_dims(m_kernelImpl.dimensions()[idxX],
m_kernelImpl.dimensions()[idxY]);
internal::IndexMapper<Index, InputDims, 2, Layout> indexMapper(
m_inputImpl.dimensions(), kernel_dims, indices);
switch (kernel_size_x) {
case 4: {
switch (kernel_size_y) {
case 7: {
LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, 7, data);
break;
}
default: {
LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, kernel_size_y, data);
break;
}
}
break;
}
case 7: {
switch (kernel_size_y) {
case 4: {
LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, 4, data);
break;
}
default: {
LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, kernel_size_y, data);
break;
}
}
break;
}
default: {
LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, kernel_size_x, kernel_size_y, data);
break;
}
}
break;
}
case 3: {
const int idxX =
static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : 2;
const int idxY =
static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 1 : 1;
const int idxZ =
static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 2 : 0;
const int kernel_size_x = m_kernelImpl.dimensions()[idxX];
const int kernel_size_y = m_kernelImpl.dimensions()[idxY];
const int kernel_size_z = m_kernelImpl.dimensions()[idxZ];
const int numX = dimensions()[m_indices[idxX]];
const int numY = dimensions()[m_indices[idxY]];
const int numZ = dimensions()[m_indices[idxZ]];
const int numP = dimensions().TotalSize() / (numX*numY*numZ);
const int maxX = numext::mini<int>(128, numext::mini<int>(maxSharedMem / (sizeof(Scalar) * kernel_size_y * kernel_size_z) - kernel_size_x + 1, numX));
const int maxY = numext::mini<int>(128, numext::mini<int>(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * kernel_size_z) - kernel_size_y + 1, numY));
const int maxZ = numext::mini<int>(128, numext::mini<int>(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1)) - kernel_size_z + 1, numZ));
dim3 block_size;
block_size.x = numext::mini(32, maxX);
block_size.y = numext::mini(32, maxY);
block_size.z = numext::mini<int>(1024/(block_size.x*block_size.y), maxZ);
dim3 num_blocks(ceil(numX, maxX), ceil(numY, maxY), ceil(numZ, maxZ));
const int shared_mem = (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * (maxZ + kernel_size_z - 1) * sizeof(Scalar);
assert(shared_mem <= maxSharedMem);
//cout << "launching 3D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl;
const array<Index, 3> indices(m_indices[idxX], m_indices[idxY],
m_indices[idxZ]);
const array<Index, 3> kernel_dims(m_kernelImpl.dimensions()[idxX],
m_kernelImpl.dimensions()[idxY],
m_kernelImpl.dimensions()[idxZ]);
internal::IndexMapper<Index, InputDims, 3, Layout> indexMapper(
m_inputImpl.dimensions(), kernel_dims, indices);
LAUNCH_CUDA_KERNEL((EigenConvolutionKernel3D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, numZ, maxZ, kernel_size_x, kernel_size_y, kernel_size_z, data);
break;
}
default: {
EIGEN_STATIC_ASSERT((NumKernelDims >= 1 && NumKernelDims <= 3), THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE);
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
eigen_assert(m_buf);
eigen_assert(index < m_dimensions.TotalSize());
return m_buf[index];
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(const Index index) const
{
eigen_assert(m_buf);
eigen_assert(index < m_dimensions.TotalSize());
return internal::ploadt<PacketReturnType, LoadMode>(m_buf+index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
// TODO(rmlarsen): FIXME: For now, this is just a copy of the CPU cost
// model.
const double kernel_size = m_kernelImpl.dimensions().TotalSize();
// We ignore the use of fused multiply-add.
const double convolve_compute_cost =
TensorOpCost::AddCost<Scalar>() + TensorOpCost::MulCost<Scalar>();
const double firstIndex_compute_cost =
NumDims *
(2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() +
TensorOpCost::DivCost<Index>());
return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) +
kernel_size * (m_inputImpl.costPerCoeff(vectorized) +
m_kernelImpl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, convolve_compute_cost, vectorized,
PacketSize));
}
private:
// No assignment (copies are needed by the kernels)
TensorEvaluator& operator = (const TensorEvaluator&);
TensorEvaluator<InputArgType, GpuDevice> m_inputImpl;
TensorEvaluator<KernelArgType, GpuDevice> m_kernelImpl;
KernelArgType m_kernelArg;
Indices m_indices;
Dimensions m_dimensions;
Scalar* m_buf;
const Scalar* m_kernel;
bool m_local_kernel;
const GpuDevice& m_device;
};
#endif
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H
| 47,585 | 42.064253 | 404 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Rasmus Munk Larsen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
#define EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
namespace Eigen {
/** \class TensorEvaluator
* \ingroup CXX11_Tensor_Module
*
* \brief A cost model used to limit the number of threads used for evaluating
* tensor expression.
*
*/
// Class storing the cost of evaluating a tensor expression in terms of the
// estimated number of operand bytes loads, bytes stored, and compute cycles.
class TensorOpCost {
public:
// TODO(rmlarsen): Fix the scalar op costs in Eigen proper. Even a simple
// model based on minimal reciprocal throughput numbers from Intel or
// Agner Fog's tables would be better than what is there now.
template <typename ArgType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int MulCost() {
return internal::functor_traits<
internal::scalar_product_op<ArgType, ArgType> >::Cost;
}
template <typename ArgType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int AddCost() {
return internal::functor_traits<internal::scalar_sum_op<ArgType> >::Cost;
}
template <typename ArgType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int DivCost() {
return internal::functor_traits<
internal::scalar_quotient_op<ArgType, ArgType> >::Cost;
}
template <typename ArgType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int ModCost() {
return internal::functor_traits<internal::scalar_mod_op<ArgType> >::Cost;
}
template <typename SrcType, typename TargetType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int CastCost() {
return internal::functor_traits<
internal::scalar_cast_op<SrcType, TargetType> >::Cost;
}
EIGEN_DEVICE_FUNC
TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {}
EIGEN_DEVICE_FUNC
TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles)
: bytes_loaded_(bytes_loaded),
bytes_stored_(bytes_stored),
compute_cycles_(compute_cycles) {}
EIGEN_DEVICE_FUNC
TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles,
bool vectorized, double packet_size)
: bytes_loaded_(bytes_loaded),
bytes_stored_(bytes_stored),
compute_cycles_(vectorized ? compute_cycles / packet_size
: compute_cycles) {
eigen_assert(bytes_loaded >= 0 && (numext::isfinite)(bytes_loaded));
eigen_assert(bytes_stored >= 0 && (numext::isfinite)(bytes_stored));
eigen_assert(compute_cycles >= 0 && (numext::isfinite)(compute_cycles));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_loaded() const {
return bytes_loaded_;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_stored() const {
return bytes_stored_;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double compute_cycles() const {
return compute_cycles_;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double total_cost(
double load_cost, double store_cost, double compute_cost) const {
return load_cost * bytes_loaded_ + store_cost * bytes_stored_ +
compute_cost * compute_cycles_;
}
// Drop memory access component. Intended for cases when memory accesses are
// sequential or are completely masked by computations.
EIGEN_DEVICE_FUNC void dropMemoryCost() {
bytes_loaded_ = 0;
bytes_stored_ = 0;
}
// TODO(rmlarsen): Define min in terms of total cost, not elementwise.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMin(
const TensorOpCost& rhs) const {
double bytes_loaded = numext::mini(bytes_loaded_, rhs.bytes_loaded());
double bytes_stored = numext::mini(bytes_stored_, rhs.bytes_stored());
double compute_cycles = numext::mini(compute_cycles_, rhs.compute_cycles());
return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles);
}
// TODO(rmlarsen): Define max in terms of total cost, not elementwise.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMax(
const TensorOpCost& rhs) const {
double bytes_loaded = numext::maxi(bytes_loaded_, rhs.bytes_loaded());
double bytes_stored = numext::maxi(bytes_stored_, rhs.bytes_stored());
double compute_cycles = numext::maxi(compute_cycles_, rhs.compute_cycles());
return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator+=(
const TensorOpCost& rhs) {
bytes_loaded_ += rhs.bytes_loaded();
bytes_stored_ += rhs.bytes_stored();
compute_cycles_ += rhs.compute_cycles();
return *this;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator*=(double rhs) {
bytes_loaded_ *= rhs;
bytes_stored_ *= rhs;
compute_cycles_ *= rhs;
return *this;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator+(
TensorOpCost lhs, const TensorOpCost& rhs) {
lhs += rhs;
return lhs;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*(
TensorOpCost lhs, double rhs) {
lhs *= rhs;
return lhs;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*(
double lhs, TensorOpCost rhs) {
rhs *= lhs;
return rhs;
}
friend std::ostream& operator<<(std::ostream& os, const TensorOpCost& tc) {
return os << "[bytes_loaded = " << tc.bytes_loaded()
<< ", bytes_stored = " << tc.bytes_stored()
<< ", compute_cycles = " << tc.compute_cycles() << "]";
}
private:
double bytes_loaded_;
double bytes_stored_;
double compute_cycles_;
};
// TODO(rmlarsen): Implement a policy that chooses an "optimal" number of theads
// in [1:max_threads] instead of just switching multi-threading off for small
// work units.
template <typename Device>
class TensorCostModel {
public:
// Scaling from Eigen compute cost to device cycles.
static const int kDeviceCyclesPerComputeCycle = 1;
// Costs in device cycles.
static const int kStartupCycles = 100000;
static const int kPerThreadCycles = 100000;
static const int kTaskSize = 40000;
// Returns the number of threads in [1:max_threads] to use for
// evaluating an expression with the given output size and cost per
// coefficient.
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int numThreads(
double output_size, const TensorOpCost& cost_per_coeff, int max_threads) {
double cost = totalCost(output_size, cost_per_coeff);
int threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9;
return numext::mini(max_threads, numext::maxi(1, threads));
}
// taskSize assesses parallel task size.
// Value of 1.0 means ideal parallel task size. Values < 1.0 mean that task
// granularity needs to be increased to mitigate parallelization overheads.
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double taskSize(
double output_size, const TensorOpCost& cost_per_coeff) {
return totalCost(output_size, cost_per_coeff) / kTaskSize;
}
private:
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double totalCost(
double output_size, const TensorOpCost& cost_per_coeff) {
// Cost of memory fetches from L2 cache. 64 is typical cache line size.
// 11 is L2 cache latency on Haswell.
// We don't know whether data is in L1, L2 or L3. But we are most interested
// in single-threaded computational time around 100us-10ms (smaller time
// is too small for parallelization, larger time is not intersting
// either because we are probably using all available threads already).
// And for the target time range, L2 seems to be what matters. Data set
// fitting into L1 is too small to take noticeable time. Data set fitting
// only into L3 presumably will take more than 10ms to load and process.
const double kLoadCycles = 1.0 / 64 * 11;
const double kStoreCycles = 1.0 / 64 * 11;
// Scaling from Eigen compute cost to device cycles.
return output_size *
cost_per_coeff.total_cost(kLoadCycles, kStoreCycles,
kDeviceCyclesPerComputeCycle);
}
};
} // namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
| 8,443 | 38.643192 | 80 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H
#define EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H
namespace Eigen {
/** \class TensorCustomUnaryOp
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor custom class.
*
*
*/
namespace internal {
template<typename CustomUnaryFunc, typename XprType>
struct traits<TensorCustomUnaryOp<CustomUnaryFunc, XprType> >
{
typedef typename XprType::Scalar Scalar;
typedef typename XprType::StorageKind StorageKind;
typedef typename XprType::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = traits<XprType>::NumDimensions;
static const int Layout = traits<XprType>::Layout;
};
template<typename CustomUnaryFunc, typename XprType>
struct eval<TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Eigen::Dense>
{
typedef const TensorCustomUnaryOp<CustomUnaryFunc, XprType>& type;
};
template<typename CustomUnaryFunc, typename XprType>
struct nested<TensorCustomUnaryOp<CustomUnaryFunc, XprType> >
{
typedef TensorCustomUnaryOp<CustomUnaryFunc, XprType> type;
};
} // end namespace internal
template<typename CustomUnaryFunc, typename XprType>
class TensorCustomUnaryOp : public TensorBase<TensorCustomUnaryOp<CustomUnaryFunc, XprType>, ReadOnlyAccessors>
{
public:
typedef typename internal::traits<TensorCustomUnaryOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename internal::nested<TensorCustomUnaryOp>::type Nested;
typedef typename internal::traits<TensorCustomUnaryOp>::StorageKind StorageKind;
typedef typename internal::traits<TensorCustomUnaryOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomUnaryOp(const XprType& expr, const CustomUnaryFunc& func)
: m_expr(expr), m_func(func) {}
EIGEN_DEVICE_FUNC
const CustomUnaryFunc& func() const { return m_func; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_expr; }
protected:
typename XprType::Nested m_expr;
const CustomUnaryFunc m_func;
};
// Eval as rvalue
template<typename CustomUnaryFunc, typename XprType, typename Device>
struct TensorEvaluator<const TensorCustomUnaryOp<CustomUnaryFunc, XprType>, Device>
{
typedef TensorCustomUnaryOp<CustomUnaryFunc, XprType> ArgType;
typedef typename internal::traits<ArgType>::Index Index;
static const int NumDims = internal::traits<ArgType>::NumDimensions;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename internal::remove_const<typename ArgType::Scalar>::type Scalar;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = false,
PacketAccess = (internal::packet_traits<Scalar>::size > 1),
BlockAccess = false,
Layout = TensorEvaluator<XprType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const ArgType& op, const Device& device)
: m_op(op), m_device(device), m_result(NULL)
{
m_dimensions = op.func().dimensions(op.expression());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
if (data) {
evalTo(data);
return false;
} else {
m_result = static_cast<CoeffReturnType*>(
m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
evalTo(m_result);
return true;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
if (m_result != NULL) {
m_device.deallocate(m_result);
m_result = NULL;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
return m_result[index];
}
template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const {
return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
// TODO(rmlarsen): Extend CustomOp API to return its cost estimate.
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_result; }
protected:
EIGEN_DEVICE_FUNC void evalTo(Scalar* data) {
TensorMap<Tensor<CoeffReturnType, NumDims, Layout, Index> > result(
data, m_dimensions);
m_op.func().eval(m_op.expression(), result, m_device);
}
Dimensions m_dimensions;
const ArgType m_op;
const Device& m_device;
CoeffReturnType* m_result;
};
/** \class TensorCustomBinaryOp
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor custom class.
*
*
*/
namespace internal {
template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>
struct traits<TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> >
{
typedef typename internal::promote_storage_type<typename LhsXprType::Scalar,
typename RhsXprType::Scalar>::ret Scalar;
typedef typename internal::promote_storage_type<typename LhsXprType::CoeffReturnType,
typename RhsXprType::CoeffReturnType>::ret CoeffReturnType;
typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
typename traits<RhsXprType>::StorageKind>::ret StorageKind;
typedef typename promote_index_type<typename traits<LhsXprType>::Index,
typename traits<RhsXprType>::Index>::type Index;
typedef typename LhsXprType::Nested LhsNested;
typedef typename RhsXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const int NumDimensions = traits<LhsXprType>::NumDimensions;
static const int Layout = traits<LhsXprType>::Layout;
};
template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>
struct eval<TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, Eigen::Dense>
{
typedef const TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>& type;
};
template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>
struct nested<TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> >
{
typedef TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> type;
};
} // end namespace internal
template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType>
class TensorCustomBinaryOp : public TensorBase<TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, ReadOnlyAccessors>
{
public:
typedef typename internal::traits<TensorCustomBinaryOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename internal::traits<TensorCustomBinaryOp>::CoeffReturnType CoeffReturnType;
typedef typename internal::nested<TensorCustomBinaryOp>::type Nested;
typedef typename internal::traits<TensorCustomBinaryOp>::StorageKind StorageKind;
typedef typename internal::traits<TensorCustomBinaryOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const CustomBinaryFunc& func)
: m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_func(func) {}
EIGEN_DEVICE_FUNC
const CustomBinaryFunc& func() const { return m_func; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename LhsXprType::Nested>::type&
lhsExpression() const { return m_lhs_xpr; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename RhsXprType::Nested>::type&
rhsExpression() const { return m_rhs_xpr; }
protected:
typename LhsXprType::Nested m_lhs_xpr;
typename RhsXprType::Nested m_rhs_xpr;
const CustomBinaryFunc m_func;
};
// Eval as rvalue
template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType, typename Device>
struct TensorEvaluator<const TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType>, Device>
{
typedef TensorCustomBinaryOp<CustomBinaryFunc, LhsXprType, RhsXprType> XprType;
typedef typename internal::traits<XprType>::Index Index;
static const int NumDims = internal::traits<XprType>::NumDimensions;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = false,
PacketAccess = (internal::packet_traits<Scalar>::size > 1),
BlockAccess = false,
Layout = TensorEvaluator<LhsXprType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_op(op), m_device(device), m_result(NULL)
{
m_dimensions = op.func().dimensions(op.lhsExpression(), op.rhsExpression());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
if (data) {
evalTo(data);
return false;
} else {
m_result = static_cast<Scalar *>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
evalTo(m_result);
return true;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
if (m_result != NULL) {
m_device.deallocate(m_result);
m_result = NULL;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
return m_result[index];
}
template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const {
return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
// TODO(rmlarsen): Extend CustomOp API to return its cost estimate.
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return m_result; }
protected:
EIGEN_DEVICE_FUNC void evalTo(Scalar* data) {
TensorMap<Tensor<Scalar, NumDims, Layout> > result(data, m_dimensions);
m_op.func().eval(m_op.lhsExpression(), m_op.rhsExpression(), result, m_device);
}
Dimensions m_dimensions;
const XprType m_op;
const Device& m_device;
CoeffReturnType* m_result;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H
| 11,445 | 35.452229 | 136 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
namespace Eigen {
/** \class TensorDevice
* \ingroup CXX11_Tensor_Module
*
* \brief Pseudo expression providing an operator = that will evaluate its argument
* on the specified computing 'device' (GPU, thread pool, ...)
*
* Example:
* C.device(EIGEN_GPU) = A + B;
*
* Todo: operator *= and /=.
*/
template <typename ExpressionType, typename DeviceType> class TensorDevice {
public:
TensorDevice(const DeviceType& device, ExpressionType& expression) : m_device(device), m_expression(expression) {}
template<typename OtherDerived>
EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) {
typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign;
Assign assign(m_expression, other);
internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device);
return *this;
}
template<typename OtherDerived>
EIGEN_STRONG_INLINE TensorDevice& operator+=(const OtherDerived& other) {
typedef typename OtherDerived::Scalar Scalar;
typedef TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const ExpressionType, const OtherDerived> Sum;
Sum sum(m_expression, other);
typedef TensorAssignOp<ExpressionType, const Sum> Assign;
Assign assign(m_expression, sum);
internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device);
return *this;
}
template<typename OtherDerived>
EIGEN_STRONG_INLINE TensorDevice& operator-=(const OtherDerived& other) {
typedef typename OtherDerived::Scalar Scalar;
typedef TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const ExpressionType, const OtherDerived> Difference;
Difference difference(m_expression, other);
typedef TensorAssignOp<ExpressionType, const Difference> Assign;
Assign assign(m_expression, difference);
internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device);
return *this;
}
protected:
const DeviceType& m_device;
ExpressionType& m_expression;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
| 2,570 | 36.26087 | 127 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H)
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H
namespace Eigen {
static const int kCudaScratchSize = 1024;
// This defines an interface that GPUDevice can take to use
// CUDA streams underneath.
class StreamInterface {
public:
virtual ~StreamInterface() {}
virtual const cudaStream_t& stream() const = 0;
virtual const cudaDeviceProp& deviceProperties() const = 0;
// Allocate memory on the actual device where the computation will run
virtual void* allocate(size_t num_bytes) const = 0;
virtual void deallocate(void* buffer) const = 0;
// Return a scratchpad buffer of size 1k
virtual void* scratchpad() const = 0;
// Return a semaphore. The semaphore is initially initialized to 0, and
// each kernel using it is responsible for resetting to 0 upon completion
// to maintain the invariant that the semaphore is always equal to 0 upon
// each kernel start.
virtual unsigned int* semaphore() const = 0;
};
static cudaDeviceProp* m_deviceProperties;
static bool m_devicePropInitialized = false;
static void initializeDeviceProp() {
if (!m_devicePropInitialized) {
// Attempts to ensure proper behavior in the case of multiple threads
// calling this function simultaneously. This would be trivial to
// implement if we could use std::mutex, but unfortunately mutex don't
// compile with nvcc, so we resort to atomics and thread fences instead.
// Note that if the caller uses a compiler that doesn't support c++11 we
// can't ensure that the initialization is thread safe.
#if __cplusplus >= 201103L
static std::atomic<bool> first(true);
if (first.exchange(false)) {
#else
static bool first = true;
if (first) {
first = false;
#endif
// We're the first thread to reach this point.
int num_devices;
cudaError_t status = cudaGetDeviceCount(&num_devices);
if (status != cudaSuccess) {
std::cerr << "Failed to get the number of CUDA devices: "
<< cudaGetErrorString(status)
<< std::endl;
assert(status == cudaSuccess);
}
m_deviceProperties = new cudaDeviceProp[num_devices];
for (int i = 0; i < num_devices; ++i) {
status = cudaGetDeviceProperties(&m_deviceProperties[i], i);
if (status != cudaSuccess) {
std::cerr << "Failed to initialize CUDA device #"
<< i
<< ": "
<< cudaGetErrorString(status)
<< std::endl;
assert(status == cudaSuccess);
}
}
#if __cplusplus >= 201103L
std::atomic_thread_fence(std::memory_order_release);
#endif
m_devicePropInitialized = true;
} else {
// Wait for the other thread to inititialize the properties.
while (!m_devicePropInitialized) {
#if __cplusplus >= 201103L
std::atomic_thread_fence(std::memory_order_acquire);
#endif
sleep(1);
}
}
}
}
static const cudaStream_t default_stream = cudaStreamDefault;
class CudaStreamDevice : public StreamInterface {
public:
// Use the default stream on the current device
CudaStreamDevice() : stream_(&default_stream), scratch_(NULL), semaphore_(NULL) {
cudaGetDevice(&device_);
initializeDeviceProp();
}
// Use the default stream on the specified device
CudaStreamDevice(int device) : stream_(&default_stream), device_(device), scratch_(NULL), semaphore_(NULL) {
initializeDeviceProp();
}
// Use the specified stream. Note that it's the
// caller responsibility to ensure that the stream can run on
// the specified device. If no device is specified the code
// assumes that the stream is associated to the current gpu device.
CudaStreamDevice(const cudaStream_t* stream, int device = -1)
: stream_(stream), device_(device), scratch_(NULL), semaphore_(NULL) {
if (device < 0) {
cudaGetDevice(&device_);
} else {
int num_devices;
cudaError_t err = cudaGetDeviceCount(&num_devices);
EIGEN_UNUSED_VARIABLE(err)
assert(err == cudaSuccess);
assert(device < num_devices);
device_ = device;
}
initializeDeviceProp();
}
virtual ~CudaStreamDevice() {
if (scratch_) {
deallocate(scratch_);
}
}
const cudaStream_t& stream() const { return *stream_; }
const cudaDeviceProp& deviceProperties() const {
return m_deviceProperties[device_];
}
virtual void* allocate(size_t num_bytes) const {
cudaError_t err = cudaSetDevice(device_);
EIGEN_UNUSED_VARIABLE(err)
assert(err == cudaSuccess);
void* result;
err = cudaMalloc(&result, num_bytes);
assert(err == cudaSuccess);
assert(result != NULL);
return result;
}
virtual void deallocate(void* buffer) const {
cudaError_t err = cudaSetDevice(device_);
EIGEN_UNUSED_VARIABLE(err)
assert(err == cudaSuccess);
assert(buffer != NULL);
err = cudaFree(buffer);
assert(err == cudaSuccess);
}
virtual void* scratchpad() const {
if (scratch_ == NULL) {
scratch_ = allocate(kCudaScratchSize + sizeof(unsigned int));
}
return scratch_;
}
virtual unsigned int* semaphore() const {
if (semaphore_ == NULL) {
char* scratch = static_cast<char*>(scratchpad()) + kCudaScratchSize;
semaphore_ = reinterpret_cast<unsigned int*>(scratch);
cudaError_t err = cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_);
EIGEN_UNUSED_VARIABLE(err)
assert(err == cudaSuccess);
}
return semaphore_;
}
private:
const cudaStream_t* stream_;
int device_;
mutable void* scratch_;
mutable unsigned int* semaphore_;
};
struct GpuDevice {
// The StreamInterface is not owned: the caller is
// responsible for its initialization and eventual destruction.
explicit GpuDevice(const StreamInterface* stream) : stream_(stream), max_blocks_(INT_MAX) {
eigen_assert(stream);
}
explicit GpuDevice(const StreamInterface* stream, int num_blocks) : stream_(stream), max_blocks_(num_blocks) {
eigen_assert(stream);
}
// TODO(bsteiner): This is an internal API, we should not expose it.
EIGEN_STRONG_INLINE const cudaStream_t& stream() const {
return stream_->stream();
}
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
return stream_->allocate(num_bytes);
}
EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
stream_->deallocate(buffer);
}
EIGEN_STRONG_INLINE void* scratchpad() const {
return stream_->scratchpad();
}
EIGEN_STRONG_INLINE unsigned int* semaphore() const {
return stream_->semaphore();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
#ifndef __CUDA_ARCH__
cudaError_t err = cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToDevice,
stream_->stream());
EIGEN_UNUSED_VARIABLE(err)
assert(err == cudaSuccess);
#else
eigen_assert(false && "The default device should be used instead to generate kernel code");
#endif
}
EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
cudaError_t err =
cudaMemcpyAsync(dst, src, n, cudaMemcpyHostToDevice, stream_->stream());
EIGEN_UNUSED_VARIABLE(err)
assert(err == cudaSuccess);
}
EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
cudaError_t err =
cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToHost, stream_->stream());
EIGEN_UNUSED_VARIABLE(err)
assert(err == cudaSuccess);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
#ifndef __CUDA_ARCH__
cudaError_t err = cudaMemsetAsync(buffer, c, n, stream_->stream());
EIGEN_UNUSED_VARIABLE(err)
assert(err == cudaSuccess);
#else
eigen_assert(false && "The default device should be used instead to generate kernel code");
#endif
}
EIGEN_STRONG_INLINE size_t numThreads() const {
// FIXME
return 32;
}
EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
// FIXME
return 48*1024;
}
EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
// We won't try to take advantage of the l2 cache for the time being, and
// there is no l3 cache on cuda devices.
return firstLevelCacheSize();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const {
#if defined(__CUDACC__) && !defined(__CUDA_ARCH__)
cudaError_t err = cudaStreamSynchronize(stream_->stream());
if (err != cudaSuccess) {
std::cerr << "Error detected in CUDA stream: "
<< cudaGetErrorString(err)
<< std::endl;
assert(err == cudaSuccess);
}
#else
assert(false && "The default device should be used instead to generate kernel code");
#endif
}
EIGEN_STRONG_INLINE int getNumCudaMultiProcessors() const {
return stream_->deviceProperties().multiProcessorCount;
}
EIGEN_STRONG_INLINE int maxCudaThreadsPerBlock() const {
return stream_->deviceProperties().maxThreadsPerBlock;
}
EIGEN_STRONG_INLINE int maxCudaThreadsPerMultiProcessor() const {
return stream_->deviceProperties().maxThreadsPerMultiProcessor;
}
EIGEN_STRONG_INLINE int sharedMemPerBlock() const {
return stream_->deviceProperties().sharedMemPerBlock;
}
EIGEN_STRONG_INLINE int majorDeviceVersion() const {
return stream_->deviceProperties().major;
}
EIGEN_STRONG_INLINE int minorDeviceVersion() const {
return stream_->deviceProperties().minor;
}
EIGEN_STRONG_INLINE int maxBlocks() const {
return max_blocks_;
}
// This function checks if the CUDA runtime recorded an error for the
// underlying stream device.
inline bool ok() const {
#ifdef __CUDACC__
cudaError_t error = cudaStreamQuery(stream_->stream());
return (error == cudaSuccess) || (error == cudaErrorNotReady);
#else
return false;
#endif
}
private:
const StreamInterface* stream_;
int max_blocks_;
};
#define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
(kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
assert(cudaGetLastError() == cudaSuccess);
// FIXME: Should be device and kernel specific.
#ifdef __CUDACC__
static EIGEN_DEVICE_FUNC inline void setCudaSharedMemConfig(cudaSharedMemConfig config) {
#ifndef __CUDA_ARCH__
cudaError_t status = cudaDeviceSetSharedMemConfig(config);
EIGEN_UNUSED_VARIABLE(status)
assert(status == cudaSuccess);
#else
EIGEN_UNUSED_VARIABLE(config)
#endif
}
#endif
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H
| 11,080 | 31.784024 | 112 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
namespace Eigen {
// Default device for the machine (typically a single cpu core)
struct DefaultDevice {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
return internal::aligned_malloc(num_bytes);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
internal::aligned_free(buffer);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
::memcpy(dst, src, n);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
memcpy(dst, src, n);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
memcpy(dst, src, n);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
::memset(buffer, c, n);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const {
#ifndef __CUDA_ARCH__
// Running on the host CPU
return 1;
#else
// Running on a CUDA device
return 32;
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
#ifndef __CUDA_ARCH__
// Running on the host CPU
return l1CacheSize();
#else
// Running on a CUDA device, return the amount of shared memory available.
return 48*1024;
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
#ifndef __CUDA_ARCH__
// Running single threaded on the host CPU
return l3CacheSize();
#else
// Running on a CUDA device
return firstLevelCacheSize();
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
#ifndef __CUDA_ARCH__
// Running single threaded on the host CPU
// Should return an enum that encodes the ISA supported by the CPU
return 1;
#else
// Running on a CUDA device
return __CUDA_ARCH__ / 100;
#endif
}
};
} // namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
| 2,474 | 29.182927 | 109 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Mehdi Goli Codeplay Software Ltd.
// Ralph Potter Codeplay Software Ltd.
// Luke Iwanski Codeplay Software Ltd.
// Contact: <[email protected]>
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#if defined(EIGEN_USE_SYCL) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H)
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
namespace Eigen {
struct SyclDevice {
/// class members
/// sycl queue
mutable cl::sycl::queue m_queue;
/// std::map is the container used to make sure that we create only one buffer
/// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
/// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
mutable std::map<const void *, std::shared_ptr<void>> buffer_map;
/// creating device by using selector
template<typename dev_Selector> SyclDevice(dev_Selector s)
:
#ifdef EIGEN_EXCEPTIONS
m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) {
for (const auto& e : l) {
try {
std::rethrow_exception(e);
} catch (cl::sycl::exception e) {
std::cout << e.what() << std::endl;
}
}
}))
#else
m_queue(cl::sycl::queue(s))
#endif
{}
// destructor
~SyclDevice() { deallocate_all(); }
template <typename T> void deallocate(T *p) const {
auto it = buffer_map.find(p);
if (it != buffer_map.end()) {
buffer_map.erase(it);
internal::aligned_free(p);
}
}
void deallocate_all() const {
std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin();
while (it!=buffer_map.end()) {
auto p=it->first;
buffer_map.erase(it);
internal::aligned_free(const_cast<void*>(p));
it=buffer_map.begin();
}
buffer_map.clear();
}
/// creation of sycl accessor for a buffer. This function first tries to find
/// the buffer in the buffer_map. If found it gets the accessor from it, if not,
///the function then adds an entry by creating a sycl buffer for that particular pointer.
template <cl::sycl::access::mode AcMd, typename T> inline cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
return (get_sycl_buffer<T>(num_bytes, ptr)->template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
}
template<typename T> inline std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const {
using Type = cl::sycl::buffer<T, 1>;
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret = buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
[](void *dataMem) { delete static_cast<Type*>(dataMem); })));
(static_cast<Type*>(buffer_map.at(ptr).get()))->set_final_data(nullptr);
return ret;
}
template <typename T> inline cl::sycl::buffer<T, 1>* get_sycl_buffer(size_t num_bytes,const T * ptr) const {
return static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(ptr, num_bytes).first->second.get());
}
/// allocating memory on the cpu
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t) const {
return internal::aligned_malloc(8);
}
// some runtime conditions that can be applied here
bool isDeviceSuitable() const { return true; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
::memcpy(dst, src, n);
}
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
memcpy(host_acc.get_pointer(), src, n);
}
/// whith the current implementation of sycl, the data is copied twice from device to host. This will be fixed soon.
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
auto it = buffer_map.find(src);
if (it != buffer_map.end()) {
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::host_buffer>();
memcpy(dst,host_acc.get_pointer(), n);
} else{
eigen_assert("no device memory found. The memory might be destroyed before creation");
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const {
::memset(buffer, c, n);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
return 1;
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
| 5,196 | 41.252033 | 214 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#if defined(EIGEN_USE_THREADS) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H)
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H
namespace Eigen {
// Use the SimpleThreadPool by default. We'll switch to the new non blocking
// thread pool later.
#ifndef EIGEN_USE_SIMPLE_THREAD_POOL
template <typename Env> using ThreadPoolTempl = NonBlockingThreadPoolTempl<Env>;
typedef NonBlockingThreadPool ThreadPool;
#else
template <typename Env> using ThreadPoolTempl = SimpleThreadPoolTempl<Env>;
typedef SimpleThreadPool ThreadPool;
#endif
// Barrier is an object that allows one or more threads to wait until
// Notify has been called a specified number of times.
class Barrier {
public:
Barrier(unsigned int count) : state_(count << 1), notified_(false) {
eigen_assert(((count << 1) >> 1) == count);
}
~Barrier() {
eigen_assert((state_>>1) == 0);
}
void Notify() {
unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2;
if (v != 1) {
eigen_assert(((v + 2) & ~1) != 0);
return; // either count has not dropped to 0, or waiter is not waiting
}
std::unique_lock<std::mutex> l(mu_);
eigen_assert(!notified_);
notified_ = true;
cv_.notify_all();
}
void Wait() {
unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel);
if ((v >> 1) == 0) return;
std::unique_lock<std::mutex> l(mu_);
while (!notified_) {
cv_.wait(l);
}
}
private:
std::mutex mu_;
std::condition_variable cv_;
std::atomic<unsigned int> state_; // low bit is waiter flag
bool notified_;
};
// Notification is an object that allows a user to to wait for another
// thread to signal a notification that an event has occurred.
//
// Multiple threads can wait on the same Notification object,
// but only one caller must call Notify() on the object.
struct Notification : Barrier {
Notification() : Barrier(1) {};
};
// Runs an arbitrary function and then calls Notify() on the passed in
// Notification.
template <typename Function, typename... Args> struct FunctionWrapperWithNotification
{
static void run(Notification* n, Function f, Args... args) {
f(args...);
if (n) {
n->Notify();
}
}
};
template <typename Function, typename... Args> struct FunctionWrapperWithBarrier
{
static void run(Barrier* b, Function f, Args... args) {
f(args...);
if (b) {
b->Notify();
}
}
};
template <typename SyncType>
static EIGEN_STRONG_INLINE void wait_until_ready(SyncType* n) {
if (n) {
n->Wait();
}
}
// Build a thread pool device on top the an existing pool of threads.
struct ThreadPoolDevice {
// The ownership of the thread pool remains with the caller.
ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores) : pool_(pool), num_threads_(num_cores) { }
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
return internal::aligned_malloc(num_bytes);
}
EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
internal::aligned_free(buffer);
}
EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
::memcpy(dst, src, n);
}
EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
memcpy(dst, src, n);
}
EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
memcpy(dst, src, n);
}
EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
::memset(buffer, c, n);
}
EIGEN_STRONG_INLINE int numThreads() const {
return num_threads_;
}
EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
return l1CacheSize();
}
EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
// The l3 cache size is shared between all the cores.
return l3CacheSize() / num_threads_;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
// Should return an enum that encodes the ISA supported by the CPU
return 1;
}
template <class Function, class... Args>
EIGEN_STRONG_INLINE Notification* enqueue(Function&& f, Args&&... args) const {
Notification* n = new Notification();
pool_->Schedule(std::bind(&FunctionWrapperWithNotification<Function, Args...>::run, n, f, args...));
return n;
}
template <class Function, class... Args>
EIGEN_STRONG_INLINE void enqueue_with_barrier(Barrier* b,
Function&& f,
Args&&... args) const {
pool_->Schedule(std::bind(
&FunctionWrapperWithBarrier<Function, Args...>::run, b, f, args...));
}
template <class Function, class... Args>
EIGEN_STRONG_INLINE void enqueueNoNotification(Function&& f, Args&&... args) const {
pool_->Schedule(std::bind(f, args...));
}
// Returns a logical thread index between 0 and pool_->NumThreads() - 1 if
// called from one of the threads in pool_. Returns -1 otherwise.
EIGEN_STRONG_INLINE int currentThreadId() const {
return pool_->CurrentThreadId();
}
// parallelFor executes f with [0, n) arguments in parallel and waits for
// completion. F accepts a half-open interval [first, last).
// Block size is choosen based on the iteration cost and resulting parallel
// efficiency. If block_align is not nullptr, it is called to round up the
// block size.
void parallelFor(Index n, const TensorOpCost& cost,
std::function<Index(Index)> block_align,
std::function<void(Index, Index)> f) const {
typedef TensorCostModel<ThreadPoolDevice> CostModel;
if (n <= 1 || numThreads() == 1 ||
CostModel::numThreads(n, cost, static_cast<int>(numThreads())) == 1) {
f(0, n);
return;
}
// Calculate block size based on (1) the iteration cost and (2) parallel
// efficiency. We want blocks to be not too small to mitigate
// parallelization overheads; not too large to mitigate tail
// effect and potential load imbalance and we also want number
// of blocks to be evenly dividable across threads.
double block_size_f = 1.0 / CostModel::taskSize(1, cost);
Index block_size = numext::mini(n, numext::maxi<Index>(1, block_size_f));
const Index max_block_size =
numext::mini(n, numext::maxi<Index>(1, 2 * block_size_f));
if (block_align) {
Index new_block_size = block_align(block_size);
eigen_assert(new_block_size >= block_size);
block_size = numext::mini(n, new_block_size);
}
Index block_count = divup(n, block_size);
// Calculate parallel efficiency as fraction of total CPU time used for
// computations:
double max_efficiency =
static_cast<double>(block_count) /
(divup<int>(block_count, numThreads()) * numThreads());
// Now try to increase block size up to max_block_size as long as it
// doesn't decrease parallel efficiency.
for (Index prev_block_count = block_count; prev_block_count > 1;) {
// This is the next block size that divides size into a smaller number
// of blocks than the current block_size.
Index coarser_block_size = divup(n, prev_block_count - 1);
if (block_align) {
Index new_block_size = block_align(coarser_block_size);
eigen_assert(new_block_size >= coarser_block_size);
coarser_block_size = numext::mini(n, new_block_size);
}
if (coarser_block_size > max_block_size) {
break; // Reached max block size. Stop.
}
// Recalculate parallel efficiency.
const Index coarser_block_count = divup(n, coarser_block_size);
eigen_assert(coarser_block_count < prev_block_count);
prev_block_count = coarser_block_count;
const double coarser_efficiency =
static_cast<double>(coarser_block_count) /
(divup<int>(coarser_block_count, numThreads()) * numThreads());
if (coarser_efficiency + 0.01 >= max_efficiency) {
// Taking it.
block_size = coarser_block_size;
block_count = coarser_block_count;
if (max_efficiency < coarser_efficiency) {
max_efficiency = coarser_efficiency;
}
}
}
// Recursively divide size into halves until we reach block_size.
// Division code rounds mid to block_size, so we are guaranteed to get
// block_count leaves that do actual computations.
Barrier barrier(static_cast<unsigned int>(block_count));
std::function<void(Index, Index)> handleRange;
handleRange = [=, &handleRange, &barrier, &f](Index first, Index last) {
if (last - first <= block_size) {
// Single block or less, execute directly.
f(first, last);
barrier.Notify();
return;
}
// Split into halves and submit to the pool.
Index mid = first + divup((last - first) / 2, block_size) * block_size;
pool_->Schedule([=, &handleRange]() { handleRange(mid, last); });
pool_->Schedule([=, &handleRange]() { handleRange(first, mid); });
};
handleRange(0, n);
barrier.Wait();
}
// Convenience wrapper for parallelFor that does not align blocks.
void parallelFor(Index n, const TensorOpCost& cost,
std::function<void(Index, Index)> f) const {
parallelFor(n, cost, nullptr, std::move(f));
}
private:
ThreadPoolInterface* pool_;
int num_threads_;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H
| 9,793 | 33.978571 | 104 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
namespace Eigen {
/** \internal
*
* \class TensorDimensionList
* \ingroup CXX11_Tensor_Module
*
* \brief Special case of tensor index list used to list all the dimensions of a tensor of rank n.
*
* \sa Tensor
*/
template <typename Index, std::size_t Rank> struct DimensionList {
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
const Index operator[] (const Index i) const { return i; }
};
namespace internal {
template<typename Index, std::size_t Rank> struct array_size<DimensionList<Index, Rank> > {
static const size_t value = Rank;
};
template<typename Index, std::size_t Rank> struct array_size<const DimensionList<Index, Rank> > {
static const size_t value = Rank;
};
template<DenseIndex n, typename Index, std::size_t Rank> const Index array_get(DimensionList<Index, Rank>&) {
return n;
}
template<DenseIndex n, typename Index, std::size_t Rank> const Index array_get(const DimensionList<Index, Rank>&) {
return n;
}
#if EIGEN_HAS_CONSTEXPR
template <typename Index, std::size_t Rank>
struct index_known_statically_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) {
return true;
}
};
template <typename Index, std::size_t Rank>
struct index_known_statically_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) {
return true;
}
};
template <typename Index, std::size_t Rank>
struct all_indices_known_statically_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct all_indices_known_statically_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct indices_statically_known_to_increase_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct indices_statically_known_to_increase_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_eq_impl<DimensionList<Index, Rank> > {
static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i == value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_eq_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i == value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_ne_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i != value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_ne_impl<const DimensionList<Index, Rank> > {
static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i != value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_gt_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i > value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_gt_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i > value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_lt_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i < value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_lt_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i < value;
}
};
#else
template <typename Index, std::size_t Rank>
struct index_known_statically_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
return true;
}
};
template <typename Index, std::size_t Rank>
struct index_known_statically_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
return true;
}
};
template <typename Index, std::size_t Rank>
struct all_indices_known_statically_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct all_indices_known_statically_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct indices_statically_known_to_increase_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct indices_statically_known_to_increase_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_eq_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_eq_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_ne_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex){
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_ne_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_gt_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_gt_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_lt_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_lt_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
| 7,674 | 31.383966 | 115 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H
#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H
namespace Eigen {
/** \internal
*
* \class TensorDimensions
* \ingroup CXX11_Tensor_Module
*
* \brief Set of classes used to encode and store the dimensions of a Tensor.
*
* The Sizes class encodes as part of the type the number of dimensions and the
* sizes corresponding to each dimension. It uses no storage space since it is
* entirely known at compile time.
* The DSizes class is its dynamic sibling: the number of dimensions is known
* at compile time but the sizes are set during execution.
*
* \sa Tensor
*/
// Boilerplate code
namespace internal {
template<std::size_t n, typename Dimension> struct dget {
static const std::size_t value = get<n, Dimension>::value;
};
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
struct fixed_size_tensor_index_linearization_helper
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
static inline Index run(array<Index, NumIndices> const& indices,
const Dimensions& dimensions)
{
return array_get<RowMajor ? n - 1 : (NumIndices - n)>(indices) +
dget<RowMajor ? n - 1 : (NumIndices - n), Dimensions>::value *
fixed_size_tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
}
};
template<typename Index, std::size_t NumIndices, bool RowMajor>
struct fixed_size_tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
static inline Index run(array<Index, NumIndices> const&, const Dimensions&)
{
return 0;
}
};
template<typename Index, std::size_t n>
struct fixed_size_tensor_index_extraction_helper
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
static inline Index run(const Index index,
const Dimensions& dimensions)
{
const Index mult = (index == n-1) ? 1 : 0;
return array_get<n-1>(dimensions) * mult +
fixed_size_tensor_index_extraction_helper<Index, n - 1>::run(index, dimensions);
}
};
template<typename Index>
struct fixed_size_tensor_index_extraction_helper<Index, 0>
{
template <typename Dimensions> EIGEN_DEVICE_FUNC
static inline Index run(const Index,
const Dimensions&)
{
return 0;
}
};
} // end namespace internal
// Fixed size
#ifndef EIGEN_EMULATE_CXX11_META_H
template <typename std::ptrdiff_t... Indices>
struct Sizes : internal::numeric_list<std::ptrdiff_t, Indices...> {
typedef internal::numeric_list<std::ptrdiff_t, Indices...> Base;
static const std::ptrdiff_t total_size = internal::arg_prod(Indices...);
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t rank() const {
return Base::count;
}
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t TotalSize() {
return internal::arg_prod(Indices...);
}
EIGEN_DEVICE_FUNC Sizes() { }
template <typename DenseIndex>
explicit EIGEN_DEVICE_FUNC Sizes(const array<DenseIndex, Base::count>& /*indices*/) {
// todo: add assertion
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template <typename... DenseIndex> EIGEN_DEVICE_FUNC Sizes(DenseIndex...) { }
explicit EIGEN_DEVICE_FUNC Sizes(std::initializer_list<std::ptrdiff_t> /*l*/) {
// todo: add assertion
}
#endif
template <typename T> Sizes& operator = (const T& /*other*/) {
// add assertion failure if the size of other is different
return *this;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::size_t index) const {
return internal::fixed_size_tensor_index_extraction_helper<std::ptrdiff_t, Base::count>::run(index, *this);
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, false>::run(indices, *static_cast<const Base*>(this));
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, true>::run(indices, *static_cast<const Base*>(this));
}
};
namespace internal {
template <typename std::ptrdiff_t... Indices>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes<Indices...>&) {
return Sizes<Indices...>::total_size;
}
}
#else
template <std::size_t n>
struct non_zero_size {
typedef internal::type2val<std::size_t, n> type;
};
template <>
struct non_zero_size<0> {
typedef internal::null_type type;
};
template <std::size_t V1=0, std::size_t V2=0, std::size_t V3=0, std::size_t V4=0, std::size_t V5=0> struct Sizes {
typedef typename internal::make_type_list<typename non_zero_size<V1>::type, typename non_zero_size<V2>::type, typename non_zero_size<V3>::type, typename non_zero_size<V4>::type, typename non_zero_size<V5>::type >::type Base;
static const size_t count = Base::count;
static const std::size_t total_size = internal::arg_prod<Base>::value;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const {
return count;
}
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t TotalSize() {
return internal::arg_prod<Base>::value;
}
Sizes() { }
template <typename DenseIndex>
explicit Sizes(const array<DenseIndex, Base::count>& /*indices*/) {
// todo: add assertion
}
template <typename T> Sizes& operator = (const T& /*other*/) {
// add assertion failure if the size of other is different
return *this;
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template <typename... DenseIndex> Sizes(DenseIndex... /*indices*/) { }
explicit Sizes(std::initializer_list<std::size_t>) {
// todo: add assertion
}
#else
EIGEN_DEVICE_FUNC explicit Sizes(const DenseIndex) {
}
EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex) {
}
EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex) {
}
EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) {
}
EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) {
}
#endif
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex operator[] (const int index) const {
switch (index) {
case 0:
return internal::get<0, Base>::value;
case 1:
return internal::get<1, Base>::value;
case 2:
return internal::get<2, Base>::value;
case 3:
return internal::get<3, Base>::value;
case 4:
return internal::get<4, Base>::value;
default:
eigen_assert(false && "index overflow");
return static_cast<DenseIndex>(-1);
}
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
size_t IndexOfColMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, false>::run(indices, *reinterpret_cast<const Base*>(this));
}
template <typename DenseIndex> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
size_t IndexOfRowMajor(const array<DenseIndex, Base::count>& indices) const {
return internal::fixed_size_tensor_index_linearization_helper<DenseIndex, Base::count, Base::count, true>::run(indices, *reinterpret_cast<const Base*>(this));
}
};
namespace internal {
template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes<V1, V2, V3, V4, V5>&) {
return Sizes<V1, V2, V3, V4, V5>::total_size;
}
}
#endif
// Boilerplate
namespace internal {
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
struct tensor_index_linearization_helper
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const& dimensions)
{
return array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
tensor_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
}
};
template<typename Index, std::size_t NumIndices, bool RowMajor>
struct tensor_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Index run(array<Index, NumIndices> const& indices, array<Index, NumIndices> const&)
{
return array_get<RowMajor ? 0 : NumIndices - 1>(indices);
}
};
} // end namespace internal
// Dynamic size
template <typename DenseIndex, int NumDims>
struct DSizes : array<DenseIndex, NumDims> {
typedef array<DenseIndex, NumDims> Base;
static const int count = NumDims;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const {
return NumDims;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const {
return (NumDims == 0) ? 1 : internal::array_prod(*static_cast<const Base*>(this));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DSizes() {
for (int i = 0 ; i < NumDims; ++i) {
(*this)[i] = 0;
}
}
EIGEN_DEVICE_FUNC explicit DSizes(const array<DenseIndex, NumDims>& a) : Base(a) { }
EIGEN_DEVICE_FUNC explicit DSizes(const DenseIndex i0) {
eigen_assert(NumDims == 1);
(*this)[0] = i0;
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) {
EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 2 == NumDims, YOU_MADE_A_PROGRAMMING_MISTAKE)
}
#else
EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1) {
eigen_assert(NumDims == 2);
(*this)[0] = i0;
(*this)[1] = i1;
}
EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2) {
eigen_assert(NumDims == 3);
(*this)[0] = i0;
(*this)[1] = i1;
(*this)[2] = i2;
}
EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3) {
eigen_assert(NumDims == 4);
(*this)[0] = i0;
(*this)[1] = i1;
(*this)[2] = i2;
(*this)[3] = i3;
}
EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3, const DenseIndex i4) {
eigen_assert(NumDims == 5);
(*this)[0] = i0;
(*this)[1] = i1;
(*this)[2] = i2;
(*this)[3] = i3;
(*this)[4] = i4;
}
#endif
EIGEN_DEVICE_FUNC DSizes& operator = (const array<DenseIndex, NumDims>& other) {
*static_cast<Base*>(this) = other;
return *this;
}
// A constexpr would be so much better here
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfColMajor(const array<DenseIndex, NumDims>& indices) const {
return internal::tensor_index_linearization_helper<DenseIndex, NumDims, NumDims - 1, false>::run(indices, *static_cast<const Base*>(this));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfRowMajor(const array<DenseIndex, NumDims>& indices) const {
return internal::tensor_index_linearization_helper<DenseIndex, NumDims, NumDims - 1, true>::run(indices, *static_cast<const Base*>(this));
}
};
// Boilerplate
namespace internal {
template<typename Index, std::size_t NumIndices, std::size_t n, bool RowMajor>
struct tensor_vsize_index_linearization_helper
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Index run(array<Index, NumIndices> const& indices, std::vector<DenseIndex> const& dimensions)
{
return array_get<RowMajor ? n : (NumIndices - n - 1)>(indices) +
array_get<RowMajor ? n : (NumIndices - n - 1)>(dimensions) *
tensor_vsize_index_linearization_helper<Index, NumIndices, n - 1, RowMajor>::run(indices, dimensions);
}
};
template<typename Index, std::size_t NumIndices, bool RowMajor>
struct tensor_vsize_index_linearization_helper<Index, NumIndices, 0, RowMajor>
{
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Index run(array<Index, NumIndices> const& indices, std::vector<DenseIndex> const&)
{
return array_get<RowMajor ? 0 : NumIndices - 1>(indices);
}
};
} // end namespace internal
namespace internal {
template <typename DenseIndex, int NumDims> struct array_size<const DSizes<DenseIndex, NumDims> > {
static const size_t value = NumDims;
};
template <typename DenseIndex, int NumDims> struct array_size<DSizes<DenseIndex, NumDims> > {
static const size_t value = NumDims;
};
#ifndef EIGEN_EMULATE_CXX11_META_H
template <typename std::ptrdiff_t... Indices> struct array_size<const Sizes<Indices...> > {
static const std::ptrdiff_t value = Sizes<Indices...>::count;
};
template <typename std::ptrdiff_t... Indices> struct array_size<Sizes<Indices...> > {
static const std::ptrdiff_t value = Sizes<Indices...>::count;
};
template <std::ptrdiff_t n, typename std::ptrdiff_t... Indices> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<Indices...>&) {
return get<n, internal::numeric_list<std::size_t, Indices...> >::value;
}
template <std::ptrdiff_t n> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<>&) {
eigen_assert(false && "should never be called");
return -1;
}
#else
template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> struct array_size<const Sizes<V1,V2,V3,V4,V5> > {
static const size_t value = Sizes<V1,V2,V3,V4,V5>::count;
};
template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> struct array_size<Sizes<V1,V2,V3,V4,V5> > {
static const size_t value = Sizes<V1,V2,V3,V4,V5>::count;
};
template <std::size_t n, std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_get(const Sizes<V1,V2,V3,V4,V5>&) {
return get<n, typename Sizes<V1,V2,V3,V4,V5>::Base>::value;
}
#endif
template <typename Dims1, typename Dims2, size_t n, size_t m>
struct sizes_match_below_dim {
static EIGEN_DEVICE_FUNC inline bool run(Dims1&, Dims2&) {
return false;
}
};
template <typename Dims1, typename Dims2, size_t n>
struct sizes_match_below_dim<Dims1, Dims2, n, n> {
static EIGEN_DEVICE_FUNC inline bool run(Dims1& dims1, Dims2& dims2) {
return (array_get<n-1>(dims1) == array_get<n-1>(dims2)) &
sizes_match_below_dim<Dims1, Dims2, n-1, n-1>::run(dims1, dims2);
}
};
template <typename Dims1, typename Dims2>
struct sizes_match_below_dim<Dims1, Dims2, 0, 0> {
static EIGEN_DEVICE_FUNC inline bool run(Dims1&, Dims2&) {
return true;
}
};
} // end namespace internal
template <typename Dims1, typename Dims2>
EIGEN_DEVICE_FUNC bool dimensions_match(Dims1& dims1, Dims2& dims2) {
return internal::sizes_match_below_dim<Dims1, Dims2, internal::array_size<Dims1>::value, internal::array_size<Dims2>::value>::run(dims1, dims2);
}
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H
| 15,537 | 35.219114 | 226 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H
#define EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H
namespace Eigen {
/** \class TensorForcedEval
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor reshaping class.
*
*
*/
namespace internal {
template<typename XprType, template <class> class MakePointer_>
struct traits<TensorEvalToOp<XprType, MakePointer_> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
enum {
Flags = 0
};
template <class T>
struct MakePointer {
// Intermediate typedef to workaround MSVC issue.
typedef MakePointer_<T> MakePointerT;
typedef typename MakePointerT::Type Type;
};
};
template<typename XprType, template <class> class MakePointer_>
struct eval<TensorEvalToOp<XprType, MakePointer_>, Eigen::Dense>
{
typedef const TensorEvalToOp<XprType, MakePointer_>& type;
};
template<typename XprType, template <class> class MakePointer_>
struct nested<TensorEvalToOp<XprType, MakePointer_>, 1, typename eval<TensorEvalToOp<XprType, MakePointer_> >::type>
{
typedef TensorEvalToOp<XprType, MakePointer_> type;
};
} // end namespace internal
template<typename XprType, template <class> class MakePointer_>
class TensorEvalToOp : public TensorBase<TensorEvalToOp<XprType, MakePointer_>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorEvalToOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename MakePointer_<CoeffReturnType>::Type PointerType;
typedef typename Eigen::internal::nested<TensorEvalToOp>::type Nested;
typedef typename Eigen::internal::traits<TensorEvalToOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorEvalToOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvalToOp(PointerType buffer, const XprType& expr)
: m_xpr(expr), m_buffer(buffer) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
EIGEN_DEVICE_FUNC PointerType buffer() const { return m_buffer; }
protected:
typename XprType::Nested m_xpr;
PointerType m_buffer;
};
template<typename ArgType, typename Device, template <class> class MakePointer_>
struct TensorEvaluator<const TensorEvalToOp<ArgType, MakePointer_>, Device>
{
typedef TensorEvalToOp<ArgType, MakePointer_> XprType;
typedef typename ArgType::Scalar Scalar;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
typedef typename XprType::Index Index;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = true
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_device(device),
m_buffer(op.buffer()), m_op(op), m_expression(op.expression())
{ }
// Used for accessor extraction in SYCL Managed TensorMap:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const XprType& op() const {
return m_op;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~TensorEvaluator() {
}
typedef typename internal::traits<const TensorEvalToOp<ArgType, MakePointer_> >::template MakePointer<CoeffReturnType>::Type DevicePointer;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(DevicePointer scalar) {
EIGEN_UNUSED_VARIABLE(scalar);
eigen_assert(scalar == NULL);
return m_impl.evalSubExprsIfNeeded(m_buffer);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) {
m_buffer[i] = m_impl.coeff(i);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) {
internal::pstoret<CoeffReturnType, PacketReturnType, Aligned>(m_buffer + i, m_impl.template packet<TensorEvaluator<ArgType, Device>::IsAligned ? Aligned : Unaligned>(i));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return m_buffer[index];
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return internal::ploadt<PacketReturnType, LoadMode>(m_buffer + index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
// We assume that evalPacket or evalScalar is called to perform the
// assignment and account for the cost of the write here.
return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC DevicePointer data() const { return m_buffer; }
ArgType expression() const { return m_expression; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
/// added for sycl in order to construct the buffer from the sycl device
const Device& device() const{return m_device;}
private:
TensorEvaluator<ArgType, Device> m_impl;
const Device& m_device;
DevicePointer m_buffer;
const XprType& m_op;
const ArgType m_expression;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H
| 6,560 | 35.049451 | 174 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H
#define EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H
namespace Eigen {
/** \class TensorEvaluator
* \ingroup CXX11_Tensor_Module
*
* \brief The tensor evaluator classes.
*
* These classes are responsible for the evaluation of the tensor expression.
*
* TODO: add support for more types of expressions, in particular expressions
* leading to lvalues (slicing, reshaping, etc...)
*/
// Generic evaluator
template<typename Derived, typename Device>
struct TensorEvaluator
{
typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
typedef typename Derived::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename Derived::Dimensions Dimensions;
// NumDimensions is -1 for variable dim tensors
static const int NumCoords = internal::traits<Derived>::NumDimensions > 0 ?
internal::traits<Derived>::NumDimensions : 0;
enum {
IsAligned = Derived::IsAligned,
PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
Layout = Derived::Layout,
CoordAccess = NumCoords > 0,
RawAccess = true
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device)
: m_data(const_cast<typename internal::traits<Derived>::template MakePointer<Scalar>::Type>(m.data())), m_dims(m.dimensions()), m_device(device), m_impl(m)
{ }
// Used for accessor extraction in SYCL Managed TensorMap:
const Derived& derived() const { return m_impl; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* dest) {
if (dest) {
m_device.memcpy((void*)dest, m_data, sizeof(Scalar) * m_dims.TotalSize());
return false;
}
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
eigen_assert(m_data);
return m_data[index];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
eigen_assert(m_data);
return m_data[index];
}
template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketReturnType packet(Index index) const
{
return internal::ploadt<PacketReturnType, LoadMode>(m_data + index);
}
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
return internal::pstoret<Scalar, PacketReturnType, StoreMode>(m_data + index, x);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array<DenseIndex, NumCoords>& coords) const {
eigen_assert(m_data);
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
return m_data[m_dims.IndexOfColMajor(coords)];
} else {
return m_data[m_dims.IndexOfRowMajor(coords)];
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<DenseIndex, NumCoords>& coords) {
eigen_assert(m_data);
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
return m_data[m_dims.IndexOfColMajor(coords)];
} else {
return m_data[m_dims.IndexOfRowMajor(coords)];
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized,
internal::unpacket_traits<PacketReturnType>::size);
}
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::template MakePointer<Scalar>::Type data() const { return m_data; }
/// required by sycl in order to construct sycl buffer from raw pointer
const Device& device() const{return m_device;}
protected:
typename internal::traits<Derived>::template MakePointer<Scalar>::Type m_data;
Dimensions m_dims;
const Device& m_device;
const Derived& m_impl;
};
namespace {
template <typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T loadConstant(const T* address) {
return *address;
}
// Use the texture cache on CUDA devices whenever possible
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float loadConstant(const float* address) {
return __ldg(address);
}
template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
double loadConstant(const double* address) {
return __ldg(address);
}
template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
Eigen::half loadConstant(const Eigen::half* address) {
return Eigen::half(half_impl::raw_uint16_to_half(__ldg(&address->x)));
}
#endif
}
// Default evaluator for rvalues
template<typename Derived, typename Device>
struct TensorEvaluator<const Derived, Device>
{
typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
typedef typename Derived::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename Derived::Dimensions Dimensions;
// NumDimensions is -1 for variable dim tensors
static const int NumCoords = internal::traits<Derived>::NumDimensions > 0 ?
internal::traits<Derived>::NumDimensions : 0;
enum {
IsAligned = Derived::IsAligned,
PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
Layout = Derived::Layout,
CoordAccess = NumCoords > 0,
RawAccess = true
};
// Used for accessor extraction in SYCL Managed TensorMap:
const Derived& derived() const { return m_impl; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device)
: m_data(m.data()), m_dims(m.dimensions()), m_device(device), m_impl(m)
{ }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
if (!NumTraits<typename internal::remove_const<Scalar>::type>::RequireInitialization && data) {
m_device.memcpy((void*)data, m_data, m_dims.TotalSize() * sizeof(Scalar));
return false;
}
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
eigen_assert(m_data);
return loadConstant(m_data+index);
}
template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketReturnType packet(Index index) const
{
return internal::ploadt_ro<PacketReturnType, LoadMode>(m_data + index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array<DenseIndex, NumCoords>& coords) const {
eigen_assert(m_data);
const Index index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_dims.IndexOfColMajor(coords)
: m_dims.IndexOfRowMajor(coords);
return loadConstant(m_data+index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized,
internal::unpacket_traits<PacketReturnType>::size);
}
EIGEN_DEVICE_FUNC typename internal::traits<Derived>::template MakePointer<const Scalar>::Type data() const { return m_data; }
/// added for sycl in order to construct the buffer from the sycl device
const Device& device() const{return m_device;}
protected:
typename internal::traits<Derived>::template MakePointer<const Scalar>::Type m_data;
Dimensions m_dims;
const Device& m_device;
const Derived& m_impl;
};
// -------------------- CwiseNullaryOp --------------------
template<typename NullaryOp, typename ArgType, typename Device>
struct TensorEvaluator<const TensorCwiseNullaryOp<NullaryOp, ArgType>, Device>
{
typedef TensorCwiseNullaryOp<NullaryOp, ArgType> XprType;
enum {
IsAligned = true,
PacketAccess = internal::functor_traits<NullaryOp>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC
TensorEvaluator(const XprType& op, const Device& device)
: m_functor(op.functor()), m_argImpl(op.nestedExpression(), device), m_wrapper()
{ }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { return true; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{
return m_wrapper(m_functor, index);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return m_wrapper.template packetOp<PacketReturnType, Index>(m_functor, index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized,
internal::unpacket_traits<PacketReturnType>::size);
}
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<ArgType, Device>& impl() const { return m_argImpl; }
/// required by sycl in order to extract the accessor
NullaryOp functor() const { return m_functor; }
private:
const NullaryOp m_functor;
TensorEvaluator<ArgType, Device> m_argImpl;
const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper;
};
// -------------------- CwiseUnaryOp --------------------
template<typename UnaryOp, typename ArgType, typename Device>
struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType>, Device>
{
typedef TensorCwiseUnaryOp<UnaryOp, ArgType> XprType;
enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess & internal::functor_traits<UnaryOp>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
: m_functor(op.functor()),
m_argImpl(op.nestedExpression(), device)
{ }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
m_argImpl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_argImpl.cleanup();
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{
return m_functor(m_argImpl.coeff(index));
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return m_functor.packetOp(m_argImpl.template packet<LoadMode>(index));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
const double functor_cost = internal::functor_traits<UnaryOp>::Cost;
return m_argImpl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, functor_cost, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<ArgType, Device> & impl() const { return m_argImpl; }
/// added for sycl in order to construct the buffer from sycl device
UnaryOp functor() const { return m_functor; }
private:
const UnaryOp m_functor;
TensorEvaluator<ArgType, Device> m_argImpl;
};
// -------------------- CwiseBinaryOp --------------------
template<typename BinaryOp, typename LeftArgType, typename RightArgType, typename Device>
struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArgType>, Device>
{
typedef TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArgType> XprType;
enum {
IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess &
internal::functor_traits<BinaryOp>::PacketAccess,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
: m_functor(op.functor()),
m_leftImpl(op.lhsExpression(), device),
m_rightImpl(op.rhsExpression(), device)
{
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout) || internal::traits<XprType>::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
}
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
typedef typename TensorEvaluator<LeftArgType, Device>::Dimensions Dimensions;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
{
// TODO: use right impl instead if right impl dimensions are known at compile time.
return m_leftImpl.dimensions();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
m_leftImpl.evalSubExprsIfNeeded(NULL);
m_rightImpl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_leftImpl.cleanup();
m_rightImpl.cleanup();
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{
return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index));
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return m_functor.packetOp(m_leftImpl.template packet<LoadMode>(index), m_rightImpl.template packet<LoadMode>(index));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
const double functor_cost = internal::functor_traits<BinaryOp>::Cost;
return m_leftImpl.costPerCoeff(vectorized) +
m_rightImpl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, functor_cost, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<LeftArgType, Device>& left_impl() const { return m_leftImpl; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<RightArgType, Device>& right_impl() const { return m_rightImpl; }
/// required by sycl in order to extract the accessor
BinaryOp functor() const { return m_functor; }
private:
const BinaryOp m_functor;
TensorEvaluator<LeftArgType, Device> m_leftImpl;
TensorEvaluator<RightArgType, Device> m_rightImpl;
};
// -------------------- CwiseTernaryOp --------------------
template<typename TernaryOp, typename Arg1Type, typename Arg2Type, typename Arg3Type, typename Device>
struct TensorEvaluator<const TensorCwiseTernaryOp<TernaryOp, Arg1Type, Arg2Type, Arg3Type>, Device>
{
typedef TensorCwiseTernaryOp<TernaryOp, Arg1Type, Arg2Type, Arg3Type> XprType;
enum {
IsAligned = TensorEvaluator<Arg1Type, Device>::IsAligned & TensorEvaluator<Arg2Type, Device>::IsAligned & TensorEvaluator<Arg3Type, Device>::IsAligned,
PacketAccess = TensorEvaluator<Arg1Type, Device>::PacketAccess & TensorEvaluator<Arg2Type, Device>::PacketAccess & TensorEvaluator<Arg3Type, Device>::PacketAccess &
internal::functor_traits<TernaryOp>::PacketAccess,
Layout = TensorEvaluator<Arg1Type, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
: m_functor(op.functor()),
m_arg1Impl(op.arg1Expression(), device),
m_arg2Impl(op.arg2Expression(), device),
m_arg3Impl(op.arg3Expression(), device)
{
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<Arg1Type, Device>::Layout) == static_cast<int>(TensorEvaluator<Arg3Type, Device>::Layout) || internal::traits<XprType>::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::StorageKind,
typename internal::traits<Arg2Type>::StorageKind>::value),
STORAGE_KIND_MUST_MATCH)
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::StorageKind,
typename internal::traits<Arg3Type>::StorageKind>::value),
STORAGE_KIND_MUST_MATCH)
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::Index,
typename internal::traits<Arg2Type>::Index>::value),
STORAGE_INDEX_MUST_MATCH)
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::Index,
typename internal::traits<Arg3Type>::Index>::value),
STORAGE_INDEX_MUST_MATCH)
eigen_assert(dimensions_match(m_arg1Impl.dimensions(), m_arg2Impl.dimensions()) && dimensions_match(m_arg1Impl.dimensions(), m_arg3Impl.dimensions()));
}
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
typedef typename TensorEvaluator<Arg1Type, Device>::Dimensions Dimensions;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
{
// TODO: use arg2 or arg3 dimensions if they are known at compile time.
return m_arg1Impl.dimensions();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
m_arg1Impl.evalSubExprsIfNeeded(NULL);
m_arg2Impl.evalSubExprsIfNeeded(NULL);
m_arg3Impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_arg1Impl.cleanup();
m_arg2Impl.cleanup();
m_arg3Impl.cleanup();
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{
return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index));
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return m_functor.packetOp(m_arg1Impl.template packet<LoadMode>(index),
m_arg2Impl.template packet<LoadMode>(index),
m_arg3Impl.template packet<LoadMode>(index));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
const double functor_cost = internal::functor_traits<TernaryOp>::Cost;
return m_arg1Impl.costPerCoeff(vectorized) +
m_arg2Impl.costPerCoeff(vectorized) +
m_arg3Impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, functor_cost, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<Arg1Type, Device> & arg1Impl() const { return m_arg1Impl; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<Arg2Type, Device>& arg2Impl() const { return m_arg2Impl; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<Arg3Type, Device>& arg3Impl() const { return m_arg3Impl; }
private:
const TernaryOp m_functor;
TensorEvaluator<Arg1Type, Device> m_arg1Impl;
TensorEvaluator<Arg2Type, Device> m_arg2Impl;
TensorEvaluator<Arg3Type, Device> m_arg3Impl;
};
// -------------------- SelectOp --------------------
template<typename IfArgType, typename ThenArgType, typename ElseArgType, typename Device>
struct TensorEvaluator<const TensorSelectOp<IfArgType, ThenArgType, ElseArgType>, Device>
{
typedef TensorSelectOp<IfArgType, ThenArgType, ElseArgType> XprType;
typedef typename XprType::Scalar Scalar;
enum {
IsAligned = TensorEvaluator<ThenArgType, Device>::IsAligned & TensorEvaluator<ElseArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ThenArgType, Device>::PacketAccess & TensorEvaluator<ElseArgType, Device>::PacketAccess &
internal::packet_traits<Scalar>::HasBlend,
Layout = TensorEvaluator<IfArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
: m_condImpl(op.ifExpression(), device),
m_thenImpl(op.thenExpression(), device),
m_elseImpl(op.elseExpression(), device)
{
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<IfArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<ThenArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<IfArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<ElseArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
eigen_assert(dimensions_match(m_condImpl.dimensions(), m_thenImpl.dimensions()));
eigen_assert(dimensions_match(m_thenImpl.dimensions(), m_elseImpl.dimensions()));
}
typedef typename XprType::Index Index;
typedef typename internal::traits<XprType>::Scalar CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
typedef typename TensorEvaluator<IfArgType, Device>::Dimensions Dimensions;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
{
// TODO: use then or else impl instead if they happen to be known at compile time.
return m_condImpl.dimensions();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
m_condImpl.evalSubExprsIfNeeded(NULL);
m_thenImpl.evalSubExprsIfNeeded(NULL);
m_elseImpl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_condImpl.cleanup();
m_thenImpl.cleanup();
m_elseImpl.cleanup();
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{
return m_condImpl.coeff(index) ? m_thenImpl.coeff(index) : m_elseImpl.coeff(index);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const
{
internal::Selector<PacketSize> select;
for (Index i = 0; i < PacketSize; ++i) {
select.select[i] = m_condImpl.coeff(index+i);
}
return internal::pblend(select,
m_thenImpl.template packet<LoadMode>(index),
m_elseImpl.template packet<LoadMode>(index));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
return m_condImpl.costPerCoeff(vectorized) +
m_thenImpl.costPerCoeff(vectorized)
.cwiseMax(m_elseImpl.costPerCoeff(vectorized));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const { return NULL; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<IfArgType, Device> & cond_impl() const { return m_condImpl; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<ThenArgType, Device>& then_impl() const { return m_thenImpl; }
/// required by sycl in order to extract the accessor
const TensorEvaluator<ElseArgType, Device>& else_impl() const { return m_elseImpl; }
private:
TensorEvaluator<IfArgType, Device> m_condImpl;
TensorEvaluator<ThenArgType, Device> m_thenImpl;
TensorEvaluator<ElseArgType, Device> m_elseImpl;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H
| 25,305 | 38.914826 | 238 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H
#define EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H
namespace Eigen {
/** \class TensorExecutor
* \ingroup CXX11_Tensor_Module
*
* \brief The tensor executor class.
*
* This class is responsible for launch the evaluation of the expression on
* the specified computing device.
*/
namespace internal {
// Default strategy: the expression is evaluated with a single cpu thread.
template<typename Expression, typename Device, bool Vectorizable>
class TensorExecutor
{
public:
typedef typename Expression::Index Index;
EIGEN_DEVICE_FUNC
static inline void run(const Expression& expr, const Device& device = Device())
{
TensorEvaluator<Expression, Device> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign)
{
const Index size = array_prod(evaluator.dimensions());
for (Index i = 0; i < size; ++i) {
evaluator.evalScalar(i);
}
}
evaluator.cleanup();
}
};
template<typename Expression>
class TensorExecutor<Expression, DefaultDevice, true>
{
public:
typedef typename Expression::Index Index;
EIGEN_DEVICE_FUNC
static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice())
{
TensorEvaluator<Expression, DefaultDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign)
{
const Index size = array_prod(evaluator.dimensions());
const int PacketSize = unpacket_traits<typename TensorEvaluator<Expression, DefaultDevice>::PacketReturnType>::size;
// Give the compiler a strong hint to unroll the loop. But don't insist
// on unrolling, because if the function is expensive the compiler should not
// unroll the loop at the expense of inlining.
const Index UnrolledSize = (size / (4 * PacketSize)) * 4 * PacketSize;
for (Index i = 0; i < UnrolledSize; i += 4*PacketSize) {
for (Index j = 0; j < 4; j++) {
evaluator.evalPacket(i + j * PacketSize);
}
}
const Index VectorizedSize = (size / PacketSize) * PacketSize;
for (Index i = UnrolledSize; i < VectorizedSize; i += PacketSize) {
evaluator.evalPacket(i);
}
for (Index i = VectorizedSize; i < size; ++i) {
evaluator.evalScalar(i);
}
}
evaluator.cleanup();
}
};
// Multicore strategy: the index space is partitioned and each partition is executed on a single core
#ifdef EIGEN_USE_THREADS
template <typename Evaluator, typename Index, bool Vectorizable>
struct EvalRange {
static void run(Evaluator* evaluator_in, const Index first, const Index last) {
Evaluator evaluator = *evaluator_in;
eigen_assert(last >= first);
for (Index i = first; i < last; ++i) {
evaluator.evalScalar(i);
}
}
static Index alignBlockSize(Index size) {
return size;
}
};
template <typename Evaluator, typename Index>
struct EvalRange<Evaluator, Index, true> {
static const int PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
static void run(Evaluator* evaluator_in, const Index first, const Index last) {
Evaluator evaluator = *evaluator_in;
eigen_assert(last >= first);
Index i = first;
if (last - first >= PacketSize) {
eigen_assert(first % PacketSize == 0);
Index last_chunk_offset = last - 4 * PacketSize;
// Give the compiler a strong hint to unroll the loop. But don't insist
// on unrolling, because if the function is expensive the compiler should not
// unroll the loop at the expense of inlining.
for (; i <= last_chunk_offset; i += 4*PacketSize) {
for (Index j = 0; j < 4; j++) {
evaluator.evalPacket(i + j * PacketSize);
}
}
last_chunk_offset = last - PacketSize;
for (; i <= last_chunk_offset; i += PacketSize) {
evaluator.evalPacket(i);
}
}
for (; i < last; ++i) {
evaluator.evalScalar(i);
}
}
static Index alignBlockSize(Index size) {
// Align block size to packet size and account for unrolling in run above.
if (size >= 16 * PacketSize) {
return (size + 4 * PacketSize - 1) & ~(4 * PacketSize - 1);
}
// Aligning to 4 * PacketSize would increase block size by more than 25%.
return (size + PacketSize - 1) & ~(PacketSize - 1);
}
};
template <typename Expression, bool Vectorizable>
class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable> {
public:
typedef typename Expression::Index Index;
static inline void run(const Expression& expr, const ThreadPoolDevice& device)
{
typedef TensorEvaluator<Expression, ThreadPoolDevice> Evaluator;
Evaluator evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign)
{
const Index size = array_prod(evaluator.dimensions());
#if !defined(EIGEN_USE_SIMPLE_THREAD_POOL)
device.parallelFor(size, evaluator.costPerCoeff(Vectorizable),
EvalRange<Evaluator, Index, Vectorizable>::alignBlockSize,
[&evaluator](Index first, Index last) {
EvalRange<Evaluator, Index, Vectorizable>::run(&evaluator, first, last);
});
#else
size_t num_threads = device.numThreads();
if (num_threads > 1) {
num_threads = TensorCostModel<ThreadPoolDevice>::numThreads(
size, evaluator.costPerCoeff(Vectorizable), num_threads);
}
if (num_threads == 1) {
EvalRange<Evaluator, Index, Vectorizable>::run(&evaluator, 0, size);
} else {
const Index PacketSize = Vectorizable ? unpacket_traits<typename Evaluator::PacketReturnType>::size : 1;
Index blocksz = std::ceil<Index>(static_cast<float>(size)/num_threads) + PacketSize - 1;
const Index blocksize = numext::maxi<Index>(PacketSize, (blocksz - (blocksz % PacketSize)));
const Index numblocks = size / blocksize;
Barrier barrier(numblocks);
for (int i = 0; i < numblocks; ++i) {
device.enqueue_with_barrier(
&barrier, &EvalRange<Evaluator, Index, Vectorizable>::run,
&evaluator, i * blocksize, (i + 1) * blocksize);
}
if (numblocks * blocksize < size) {
EvalRange<Evaluator, Index, Vectorizable>::run(
&evaluator, numblocks * blocksize, size);
}
barrier.Wait();
}
#endif // defined(!EIGEN_USE_SIMPLE_THREAD_POOL)
}
evaluator.cleanup();
}
};
#endif // EIGEN_USE_THREADS
// GPU: the evaluation of the expression is offloaded to a GPU.
#if defined(EIGEN_USE_GPU)
template <typename Expression, bool Vectorizable>
class TensorExecutor<Expression, GpuDevice, Vectorizable> {
public:
typedef typename Expression::Index Index;
static void run(const Expression& expr, const GpuDevice& device);
};
#if defined(__CUDACC__)
template <typename Evaluator, typename Index, bool Vectorizable>
struct EigenMetaKernelEval {
static __device__ EIGEN_ALWAYS_INLINE
void run(Evaluator& eval, Index first, Index last, Index step_size) {
for (Index i = first; i < last; i += step_size) {
eval.evalScalar(i);
}
}
};
template <typename Evaluator, typename Index>
struct EigenMetaKernelEval<Evaluator, Index, true> {
static __device__ EIGEN_ALWAYS_INLINE
void run(Evaluator& eval, Index first, Index last, Index step_size) {
const Index PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
const Index vectorized_size = (last / PacketSize) * PacketSize;
const Index vectorized_step_size = step_size * PacketSize;
// Use the vector path
for (Index i = first * PacketSize; i < vectorized_size;
i += vectorized_step_size) {
eval.evalPacket(i);
}
for (Index i = vectorized_size + first; i < last; i += step_size) {
eval.evalScalar(i);
}
}
};
template <typename Evaluator, typename Index>
__global__ void
__launch_bounds__(1024)
EigenMetaKernel(Evaluator eval, Index size) {
const Index first_index = blockIdx.x * blockDim.x + threadIdx.x;
const Index step_size = blockDim.x * gridDim.x;
const bool vectorizable = Evaluator::PacketAccess & Evaluator::IsAligned;
EigenMetaKernelEval<Evaluator, Index, vectorizable>::run(eval, first_index, size, step_size);
}
/*static*/
template <typename Expression, bool Vectorizable>
inline void TensorExecutor<Expression, GpuDevice, Vectorizable>::run(
const Expression& expr, const GpuDevice& device) {
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign) {
const int block_size = device.maxCudaThreadsPerBlock();
const int max_blocks = device.getNumCudaMultiProcessors() *
device.maxCudaThreadsPerMultiProcessor() / block_size;
const Index size = array_prod(evaluator.dimensions());
// Create a least one block to ensure we won't crash when tensorflow calls with tensors of size 0.
const int num_blocks = numext::maxi<int>(numext::mini<int>(max_blocks, divup<int>(size, block_size)), 1);
LAUNCH_CUDA_KERNEL(
(EigenMetaKernel<TensorEvaluator<Expression, GpuDevice>, Index>),
num_blocks, block_size, 0, device, evaluator, size);
}
evaluator.cleanup();
}
#endif // __CUDACC__
#endif // EIGEN_USE_GPU
// SYCL Executor policy
#ifdef EIGEN_USE_SYCL
template <typename Expression, bool Vectorizable>
class TensorExecutor<Expression, SyclDevice, Vectorizable> {
public:
static inline void run(const Expression &expr, const SyclDevice &device) {
// call TensorSYCL module
TensorSycl::run(expr, device);
}
};
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H
| 10,248 | 34.463668 | 122 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXPR_H
#define EIGEN_CXX11_TENSOR_TENSOR_EXPR_H
namespace Eigen {
/** \class TensorExpr
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor expression classes.
*
* The TensorCwiseNullaryOp class applies a nullary operators to an expression.
* This is typically used to generate constants.
*
* The TensorCwiseUnaryOp class represents an expression where a unary operator
* (e.g. cwiseSqrt) is applied to an expression.
*
* The TensorCwiseBinaryOp class represents an expression where a binary
* operator (e.g. addition) is applied to a lhs and a rhs expression.
*
*/
namespace internal {
template<typename NullaryOp, typename XprType>
struct traits<TensorCwiseNullaryOp<NullaryOp, XprType> >
: traits<XprType>
{
typedef traits<XprType> XprTraits;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::Nested XprTypeNested;
typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
enum {
Flags = 0
};
};
} // end namespace internal
template<typename NullaryOp, typename XprType>
class TensorCwiseNullaryOp : public TensorBase<TensorCwiseNullaryOp<NullaryOp, XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorCwiseNullaryOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef TensorCwiseNullaryOp<NullaryOp, XprType> Nested;
typedef typename Eigen::internal::traits<TensorCwiseNullaryOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorCwiseNullaryOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseNullaryOp(const XprType& xpr, const NullaryOp& func = NullaryOp())
: m_xpr(xpr), m_functor(func) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
nestedExpression() const { return m_xpr; }
EIGEN_DEVICE_FUNC
const NullaryOp& functor() const { return m_functor; }
protected:
typename XprType::Nested m_xpr;
const NullaryOp m_functor;
};
namespace internal {
template<typename UnaryOp, typename XprType>
struct traits<TensorCwiseUnaryOp<UnaryOp, XprType> >
: traits<XprType>
{
// TODO(phli): Add InputScalar, InputPacket. Check references to
// current Scalar/Packet to see if the intent is Input or Output.
typedef typename result_of<UnaryOp(typename XprType::Scalar)>::type Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprType::Nested XprTypeNested;
typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
};
template<typename UnaryOp, typename XprType>
struct eval<TensorCwiseUnaryOp<UnaryOp, XprType>, Eigen::Dense>
{
typedef const TensorCwiseUnaryOp<UnaryOp, XprType>& type;
};
template<typename UnaryOp, typename XprType>
struct nested<TensorCwiseUnaryOp<UnaryOp, XprType>, 1, typename eval<TensorCwiseUnaryOp<UnaryOp, XprType> >::type>
{
typedef TensorCwiseUnaryOp<UnaryOp, XprType> type;
};
} // end namespace internal
template<typename UnaryOp, typename XprType>
class TensorCwiseUnaryOp : public TensorBase<TensorCwiseUnaryOp<UnaryOp, XprType>, ReadOnlyAccessors>
{
public:
// TODO(phli): Add InputScalar, InputPacket. Check references to
// current Scalar/Packet to see if the intent is Input or Output.
typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef Scalar CoeffReturnType;
typedef typename Eigen::internal::nested<TensorCwiseUnaryOp>::type Nested;
typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorCwiseUnaryOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
: m_xpr(xpr), m_functor(func) {}
EIGEN_DEVICE_FUNC
const UnaryOp& functor() const { return m_functor; }
/** \returns the nested expression */
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
nestedExpression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
const UnaryOp m_functor;
};
namespace internal {
template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
struct traits<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs
// are different.
// TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to
// current Scalar/Packet to see if the intent is Inputs or Output.
typedef typename result_of<
BinaryOp(typename LhsXprType::Scalar,
typename RhsXprType::Scalar)>::type Scalar;
typedef traits<LhsXprType> XprTraits;
typedef typename promote_storage_type<
typename traits<LhsXprType>::StorageKind,
typename traits<RhsXprType>::StorageKind>::ret StorageKind;
typedef typename promote_index_type<
typename traits<LhsXprType>::Index,
typename traits<RhsXprType>::Index>::type Index;
typedef typename LhsXprType::Nested LhsNested;
typedef typename RhsXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
enum {
Flags = 0
};
};
template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
struct eval<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>, Eigen::Dense>
{
typedef const TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>& type;
};
template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
struct nested<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>, 1, typename eval<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >::type>
{
typedef TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> type;
};
} // end namespace internal
template<typename BinaryOp, typename LhsXprType, typename RhsXprType>
class TensorCwiseBinaryOp : public TensorBase<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType>, ReadOnlyAccessors>
{
public:
// TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to
// current Scalar/Packet to see if the intent is Inputs or Output.
typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef Scalar CoeffReturnType;
typedef typename Eigen::internal::nested<TensorCwiseBinaryOp>::type Nested;
typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorCwiseBinaryOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const BinaryOp& func = BinaryOp())
: m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_functor(func) {}
EIGEN_DEVICE_FUNC
const BinaryOp& functor() const { return m_functor; }
/** \returns the nested expressions */
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename LhsXprType::Nested>::type&
lhsExpression() const { return m_lhs_xpr; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename RhsXprType::Nested>::type&
rhsExpression() const { return m_rhs_xpr; }
protected:
typename LhsXprType::Nested m_lhs_xpr;
typename RhsXprType::Nested m_rhs_xpr;
const BinaryOp m_functor;
};
namespace internal {
template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType>
struct traits<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType> >
{
// Type promotion to handle the case where the types of the args are different.
typedef typename result_of<
TernaryOp(typename Arg1XprType::Scalar,
typename Arg2XprType::Scalar,
typename Arg3XprType::Scalar)>::type Scalar;
typedef traits<Arg1XprType> XprTraits;
typedef typename traits<Arg1XprType>::StorageKind StorageKind;
typedef typename traits<Arg1XprType>::Index Index;
typedef typename Arg1XprType::Nested Arg1Nested;
typedef typename Arg2XprType::Nested Arg2Nested;
typedef typename Arg3XprType::Nested Arg3Nested;
typedef typename remove_reference<Arg1Nested>::type _Arg1Nested;
typedef typename remove_reference<Arg2Nested>::type _Arg2Nested;
typedef typename remove_reference<Arg3Nested>::type _Arg3Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
enum {
Flags = 0
};
};
template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType>
struct eval<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType>, Eigen::Dense>
{
typedef const TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType>& type;
};
template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType>
struct nested<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType>, 1, typename eval<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType> >::type>
{
typedef TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType> type;
};
} // end namespace internal
template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType>
class TensorCwiseTernaryOp : public TensorBase<TensorCwiseTernaryOp<TernaryOp, Arg1XprType, Arg2XprType, Arg3XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorCwiseTernaryOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef Scalar CoeffReturnType;
typedef typename Eigen::internal::nested<TensorCwiseTernaryOp>::type Nested;
typedef typename Eigen::internal::traits<TensorCwiseTernaryOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorCwiseTernaryOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseTernaryOp(const Arg1XprType& arg1, const Arg2XprType& arg2, const Arg3XprType& arg3, const TernaryOp& func = TernaryOp())
: m_arg1_xpr(arg1), m_arg2_xpr(arg2), m_arg3_xpr(arg3), m_functor(func) {}
EIGEN_DEVICE_FUNC
const TernaryOp& functor() const { return m_functor; }
/** \returns the nested expressions */
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename Arg1XprType::Nested>::type&
arg1Expression() const { return m_arg1_xpr; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename Arg2XprType::Nested>::type&
arg2Expression() const { return m_arg2_xpr; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename Arg3XprType::Nested>::type&
arg3Expression() const { return m_arg3_xpr; }
protected:
typename Arg1XprType::Nested m_arg1_xpr;
typename Arg2XprType::Nested m_arg2_xpr;
typename Arg3XprType::Nested m_arg3_xpr;
const TernaryOp m_functor;
};
namespace internal {
template<typename IfXprType, typename ThenXprType, typename ElseXprType>
struct traits<TensorSelectOp<IfXprType, ThenXprType, ElseXprType> >
: traits<ThenXprType>
{
typedef typename traits<ThenXprType>::Scalar Scalar;
typedef traits<ThenXprType> XprTraits;
typedef typename promote_storage_type<typename traits<ThenXprType>::StorageKind,
typename traits<ElseXprType>::StorageKind>::ret StorageKind;
typedef typename promote_index_type<typename traits<ElseXprType>::Index,
typename traits<ThenXprType>::Index>::type Index;
typedef typename IfXprType::Nested IfNested;
typedef typename ThenXprType::Nested ThenNested;
typedef typename ElseXprType::Nested ElseNested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
};
template<typename IfXprType, typename ThenXprType, typename ElseXprType>
struct eval<TensorSelectOp<IfXprType, ThenXprType, ElseXprType>, Eigen::Dense>
{
typedef const TensorSelectOp<IfXprType, ThenXprType, ElseXprType>& type;
};
template<typename IfXprType, typename ThenXprType, typename ElseXprType>
struct nested<TensorSelectOp<IfXprType, ThenXprType, ElseXprType>, 1, typename eval<TensorSelectOp<IfXprType, ThenXprType, ElseXprType> >::type>
{
typedef TensorSelectOp<IfXprType, ThenXprType, ElseXprType> type;
};
} // end namespace internal
template<typename IfXprType, typename ThenXprType, typename ElseXprType>
class TensorSelectOp : public TensorBase<TensorSelectOp<IfXprType, ThenXprType, ElseXprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorSelectOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename internal::promote_storage_type<typename ThenXprType::CoeffReturnType,
typename ElseXprType::CoeffReturnType>::ret CoeffReturnType;
typedef typename Eigen::internal::nested<TensorSelectOp>::type Nested;
typedef typename Eigen::internal::traits<TensorSelectOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorSelectOp>::Index Index;
EIGEN_DEVICE_FUNC
TensorSelectOp(const IfXprType& a_condition,
const ThenXprType& a_then,
const ElseXprType& a_else)
: m_condition(a_condition), m_then(a_then), m_else(a_else)
{ }
EIGEN_DEVICE_FUNC
const IfXprType& ifExpression() const { return m_condition; }
EIGEN_DEVICE_FUNC
const ThenXprType& thenExpression() const { return m_then; }
EIGEN_DEVICE_FUNC
const ElseXprType& elseExpression() const { return m_else; }
protected:
typename IfXprType::Nested m_condition;
typename ThenXprType::Nested m_then;
typename ElseXprType::Nested m_else;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_EXPR_H
| 14,694 | 38.502688 | 182 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Jianwei Cui <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FFT_H
#define EIGEN_CXX11_TENSOR_TENSOR_FFT_H
// This code requires the ability to initialize arrays of constant
// values directly inside a class.
#if __cplusplus >= 201103L || EIGEN_COMP_MSVC >= 1900
namespace Eigen {
/** \class TensorFFT
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor FFT class.
*
* TODO:
* Vectorize the Cooley Tukey and the Bluestein algorithm
* Add support for multithreaded evaluation
* Improve the performance on GPU
*/
template <bool NeedUprade> struct MakeComplex {
template <typename T>
EIGEN_DEVICE_FUNC
T operator() (const T& val) const { return val; }
};
template <> struct MakeComplex<true> {
template <typename T>
EIGEN_DEVICE_FUNC
std::complex<T> operator() (const T& val) const { return std::complex<T>(val, 0); }
};
template <> struct MakeComplex<false> {
template <typename T>
EIGEN_DEVICE_FUNC
std::complex<T> operator() (const std::complex<T>& val) const { return val; }
};
template <int ResultType> struct PartOf {
template <typename T> T operator() (const T& val) const { return val; }
};
template <> struct PartOf<RealPart> {
template <typename T> T operator() (const std::complex<T>& val) const { return val.real(); }
};
template <> struct PartOf<ImagPart> {
template <typename T> T operator() (const std::complex<T>& val) const { return val.imag(); }
};
namespace internal {
template <typename FFT, typename XprType, int FFTResultType, int FFTDir>
struct traits<TensorFFTOp<FFT, XprType, FFTResultType, FFTDir> > : public traits<XprType> {
typedef traits<XprType> XprTraits;
typedef typename NumTraits<typename XprTraits::Scalar>::Real RealScalar;
typedef typename std::complex<RealScalar> ComplexScalar;
typedef typename XprTraits::Scalar InputScalar;
typedef typename conditional<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar>::type OutputScalar;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
};
template <typename FFT, typename XprType, int FFTResultType, int FFTDirection>
struct eval<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>, Eigen::Dense> {
typedef const TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>& type;
};
template <typename FFT, typename XprType, int FFTResultType, int FFTDirection>
struct nested<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>, 1, typename eval<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection> >::type> {
typedef TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection> type;
};
} // end namespace internal
template <typename FFT, typename XprType, int FFTResultType, int FFTDir>
class TensorFFTOp : public TensorBase<TensorFFTOp<FFT, XprType, FFTResultType, FFTDir>, ReadOnlyAccessors> {
public:
typedef typename Eigen::internal::traits<TensorFFTOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename std::complex<RealScalar> ComplexScalar;
typedef typename internal::conditional<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar>::type OutputScalar;
typedef OutputScalar CoeffReturnType;
typedef typename Eigen::internal::nested<TensorFFTOp>::type Nested;
typedef typename Eigen::internal::traits<TensorFFTOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorFFTOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFFTOp(const XprType& expr, const FFT& fft)
: m_xpr(expr), m_fft(fft) {}
EIGEN_DEVICE_FUNC
const FFT& fft() const { return m_fft; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type& expression() const {
return m_xpr;
}
protected:
typename XprType::Nested m_xpr;
const FFT m_fft;
};
// Eval as rvalue
template <typename FFT, typename ArgType, typename Device, int FFTResultType, int FFTDir>
struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, Device> {
typedef TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename std::complex<RealScalar> ComplexScalar;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
typedef internal::traits<XprType> XprTraits;
typedef typename XprTraits::Scalar InputScalar;
typedef typename internal::conditional<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar>::type OutputScalar;
typedef OutputScalar CoeffReturnType;
typedef typename PacketType<OutputScalar, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = false,
PacketAccess = true,
BlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false,
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_fft(op.fft()), m_impl(op.expression(), device), m_data(NULL), m_device(device) {
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
for (int i = 0; i < NumDims; ++i) {
eigen_assert(input_dims[i] > 0);
m_dimensions[i] = input_dims[i];
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_strides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
}
} else {
m_strides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
}
}
m_size = m_dimensions.TotalSize();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
return m_dimensions;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(OutputScalar* data) {
m_impl.evalSubExprsIfNeeded(NULL);
if (data) {
evalToBuf(data);
return false;
} else {
m_data = (CoeffReturnType*)m_device.allocate(sizeof(CoeffReturnType) * m_size);
evalToBuf(m_data);
return true;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
if (m_data) {
m_device.deallocate(m_data);
m_data = NULL;
}
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const {
return m_data[index];
}
template <int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType
packet(Index index) const {
return internal::ploadt<PacketReturnType, LoadMode>(m_data + index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC Scalar* data() const { return m_data; }
private:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalToBuf(OutputScalar* data) {
const bool write_to_out = internal::is_same<OutputScalar, ComplexScalar>::value;
ComplexScalar* buf = write_to_out ? (ComplexScalar*)data : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * m_size);
for (Index i = 0; i < m_size; ++i) {
buf[i] = MakeComplex<internal::is_same<InputScalar, RealScalar>::value>()(m_impl.coeff(i));
}
for (size_t i = 0; i < m_fft.size(); ++i) {
Index dim = m_fft[i];
eigen_assert(dim >= 0 && dim < NumDims);
Index line_len = m_dimensions[dim];
eigen_assert(line_len >= 1);
ComplexScalar* line_buf = (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * line_len);
const bool is_power_of_two = isPowerOfTwo(line_len);
const Index good_composite = is_power_of_two ? 0 : findGoodComposite(line_len);
const Index log_len = is_power_of_two ? getLog2(line_len) : getLog2(good_composite);
ComplexScalar* a = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite);
ComplexScalar* b = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite);
ComplexScalar* pos_j_base_powered = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * (line_len + 1));
if (!is_power_of_two) {
// Compute twiddle factors
// t_n = exp(sqrt(-1) * pi * n^2 / line_len)
// for n = 0, 1,..., line_len-1.
// For n > 2 we use the recurrence t_n = t_{n-1}^2 / t_{n-2} * t_1^2
pos_j_base_powered[0] = ComplexScalar(1, 0);
if (line_len > 1) {
const RealScalar pi_over_len(EIGEN_PI / line_len);
const ComplexScalar pos_j_base = ComplexScalar(
std::cos(pi_over_len), std::sin(pi_over_len));
pos_j_base_powered[1] = pos_j_base;
if (line_len > 2) {
const ComplexScalar pos_j_base_sq = pos_j_base * pos_j_base;
for (int j = 2; j < line_len + 1; ++j) {
pos_j_base_powered[j] = pos_j_base_powered[j - 1] *
pos_j_base_powered[j - 1] /
pos_j_base_powered[j - 2] * pos_j_base_sq;
}
}
}
}
for (Index partial_index = 0; partial_index < m_size / line_len; ++partial_index) {
const Index base_offset = getBaseOffsetFromIndex(partial_index, dim);
// get data into line_buf
const Index stride = m_strides[dim];
if (stride == 1) {
memcpy(line_buf, &buf[base_offset], line_len*sizeof(ComplexScalar));
} else {
Index offset = base_offset;
for (int j = 0; j < line_len; ++j, offset += stride) {
line_buf[j] = buf[offset];
}
}
// processs the line
if (is_power_of_two) {
processDataLineCooleyTukey(line_buf, line_len, log_len);
}
else {
processDataLineBluestein(line_buf, line_len, good_composite, log_len, a, b, pos_j_base_powered);
}
// write back
if (FFTDir == FFT_FORWARD && stride == 1) {
memcpy(&buf[base_offset], line_buf, line_len*sizeof(ComplexScalar));
} else {
Index offset = base_offset;
const ComplexScalar div_factor = ComplexScalar(1.0 / line_len, 0);
for (int j = 0; j < line_len; ++j, offset += stride) {
buf[offset] = (FFTDir == FFT_FORWARD) ? line_buf[j] : line_buf[j] * div_factor;
}
}
}
m_device.deallocate(line_buf);
if (!is_power_of_two) {
m_device.deallocate(a);
m_device.deallocate(b);
m_device.deallocate(pos_j_base_powered);
}
}
if(!write_to_out) {
for (Index i = 0; i < m_size; ++i) {
data[i] = PartOf<FFTResultType>()(buf[i]);
}
m_device.deallocate(buf);
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static bool isPowerOfTwo(Index x) {
eigen_assert(x > 0);
return !(x & (x - 1));
}
// The composite number for padding, used in Bluestein's FFT algorithm
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index findGoodComposite(Index n) {
Index i = 2;
while (i < 2 * n - 1) i *= 2;
return i;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index getLog2(Index m) {
Index log2m = 0;
while (m >>= 1) log2m++;
return log2m;
}
// Call Cooley Tukey algorithm directly, data length must be power of 2
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineCooleyTukey(ComplexScalar* line_buf, Index line_len, Index log_len) {
eigen_assert(isPowerOfTwo(line_len));
scramble_FFT(line_buf, line_len);
compute_1D_Butterfly<FFTDir>(line_buf, line_len, log_len);
}
// Call Bluestein's FFT algorithm, m is a good composite number greater than (2 * n - 1), used as the padding length
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineBluestein(ComplexScalar* line_buf, Index line_len, Index good_composite, Index log_len, ComplexScalar* a, ComplexScalar* b, const ComplexScalar* pos_j_base_powered) {
Index n = line_len;
Index m = good_composite;
ComplexScalar* data = line_buf;
for (Index i = 0; i < n; ++i) {
if(FFTDir == FFT_FORWARD) {
a[i] = data[i] * numext::conj(pos_j_base_powered[i]);
}
else {
a[i] = data[i] * pos_j_base_powered[i];
}
}
for (Index i = n; i < m; ++i) {
a[i] = ComplexScalar(0, 0);
}
for (Index i = 0; i < n; ++i) {
if(FFTDir == FFT_FORWARD) {
b[i] = pos_j_base_powered[i];
}
else {
b[i] = numext::conj(pos_j_base_powered[i]);
}
}
for (Index i = n; i < m - n; ++i) {
b[i] = ComplexScalar(0, 0);
}
for (Index i = m - n; i < m; ++i) {
if(FFTDir == FFT_FORWARD) {
b[i] = pos_j_base_powered[m-i];
}
else {
b[i] = numext::conj(pos_j_base_powered[m-i]);
}
}
scramble_FFT(a, m);
compute_1D_Butterfly<FFT_FORWARD>(a, m, log_len);
scramble_FFT(b, m);
compute_1D_Butterfly<FFT_FORWARD>(b, m, log_len);
for (Index i = 0; i < m; ++i) {
a[i] *= b[i];
}
scramble_FFT(a, m);
compute_1D_Butterfly<FFT_REVERSE>(a, m, log_len);
//Do the scaling after ifft
for (Index i = 0; i < m; ++i) {
a[i] /= m;
}
for (Index i = 0; i < n; ++i) {
if(FFTDir == FFT_FORWARD) {
data[i] = a[i] * numext::conj(pos_j_base_powered[i]);
}
else {
data[i] = a[i] * pos_j_base_powered[i];
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void scramble_FFT(ComplexScalar* data, Index n) {
eigen_assert(isPowerOfTwo(n));
Index j = 1;
for (Index i = 1; i < n; ++i){
if (j > i) {
std::swap(data[j-1], data[i-1]);
}
Index m = n >> 1;
while (m >= 2 && j > m) {
j -= m;
m >>= 1;
}
j += m;
}
}
template <int Dir>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_2(ComplexScalar* data) {
ComplexScalar tmp = data[1];
data[1] = data[0] - data[1];
data[0] += tmp;
}
template <int Dir>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_4(ComplexScalar* data) {
ComplexScalar tmp[4];
tmp[0] = data[0] + data[1];
tmp[1] = data[0] - data[1];
tmp[2] = data[2] + data[3];
if (Dir == FFT_FORWARD) {
tmp[3] = ComplexScalar(0.0, -1.0) * (data[2] - data[3]);
} else {
tmp[3] = ComplexScalar(0.0, 1.0) * (data[2] - data[3]);
}
data[0] = tmp[0] + tmp[2];
data[1] = tmp[1] + tmp[3];
data[2] = tmp[0] - tmp[2];
data[3] = tmp[1] - tmp[3];
}
template <int Dir>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_8(ComplexScalar* data) {
ComplexScalar tmp_1[8];
ComplexScalar tmp_2[8];
tmp_1[0] = data[0] + data[1];
tmp_1[1] = data[0] - data[1];
tmp_1[2] = data[2] + data[3];
if (Dir == FFT_FORWARD) {
tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, -1);
} else {
tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, 1);
}
tmp_1[4] = data[4] + data[5];
tmp_1[5] = data[4] - data[5];
tmp_1[6] = data[6] + data[7];
if (Dir == FFT_FORWARD) {
tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, -1);
} else {
tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, 1);
}
tmp_2[0] = tmp_1[0] + tmp_1[2];
tmp_2[1] = tmp_1[1] + tmp_1[3];
tmp_2[2] = tmp_1[0] - tmp_1[2];
tmp_2[3] = tmp_1[1] - tmp_1[3];
tmp_2[4] = tmp_1[4] + tmp_1[6];
// SQRT2DIV2 = sqrt(2)/2
#define SQRT2DIV2 0.7071067811865476
if (Dir == FFT_FORWARD) {
tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, -SQRT2DIV2);
tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, -1);
tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, -SQRT2DIV2);
} else {
tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, SQRT2DIV2);
tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, 1);
tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, SQRT2DIV2);
}
data[0] = tmp_2[0] + tmp_2[4];
data[1] = tmp_2[1] + tmp_2[5];
data[2] = tmp_2[2] + tmp_2[6];
data[3] = tmp_2[3] + tmp_2[7];
data[4] = tmp_2[0] - tmp_2[4];
data[5] = tmp_2[1] - tmp_2[5];
data[6] = tmp_2[2] - tmp_2[6];
data[7] = tmp_2[3] - tmp_2[7];
}
template <int Dir>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_1D_merge(
ComplexScalar* data, Index n, Index n_power_of_2) {
// Original code:
// RealScalar wtemp = std::sin(M_PI/n);
// RealScalar wpi = -std::sin(2 * M_PI/n);
const RealScalar wtemp = m_sin_PI_div_n_LUT[n_power_of_2];
const RealScalar wpi = (Dir == FFT_FORWARD)
? m_minus_sin_2_PI_div_n_LUT[n_power_of_2]
: -m_minus_sin_2_PI_div_n_LUT[n_power_of_2];
const ComplexScalar wp(wtemp, wpi);
const ComplexScalar wp_one = wp + ComplexScalar(1, 0);
const ComplexScalar wp_one_2 = wp_one * wp_one;
const ComplexScalar wp_one_3 = wp_one_2 * wp_one;
const ComplexScalar wp_one_4 = wp_one_3 * wp_one;
const Index n2 = n / 2;
ComplexScalar w(1.0, 0.0);
for (Index i = 0; i < n2; i += 4) {
ComplexScalar temp0(data[i + n2] * w);
ComplexScalar temp1(data[i + 1 + n2] * w * wp_one);
ComplexScalar temp2(data[i + 2 + n2] * w * wp_one_2);
ComplexScalar temp3(data[i + 3 + n2] * w * wp_one_3);
w = w * wp_one_4;
data[i + n2] = data[i] - temp0;
data[i] += temp0;
data[i + 1 + n2] = data[i + 1] - temp1;
data[i + 1] += temp1;
data[i + 2 + n2] = data[i + 2] - temp2;
data[i + 2] += temp2;
data[i + 3 + n2] = data[i + 3] - temp3;
data[i + 3] += temp3;
}
}
template <int Dir>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_1D_Butterfly(
ComplexScalar* data, Index n, Index n_power_of_2) {
eigen_assert(isPowerOfTwo(n));
if (n > 8) {
compute_1D_Butterfly<Dir>(data, n / 2, n_power_of_2 - 1);
compute_1D_Butterfly<Dir>(data + n / 2, n / 2, n_power_of_2 - 1);
butterfly_1D_merge<Dir>(data, n, n_power_of_2);
} else if (n == 8) {
butterfly_8<Dir>(data);
} else if (n == 4) {
butterfly_4<Dir>(data);
} else if (n == 2) {
butterfly_2<Dir>(data);
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getBaseOffsetFromIndex(Index index, Index omitted_dim) const {
Index result = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > omitted_dim; --i) {
const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim];
const Index idx = index / partial_m_stride;
index -= idx * partial_m_stride;
result += idx * m_strides[i];
}
result += index;
}
else {
for (Index i = 0; i < omitted_dim; ++i) {
const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim];
const Index idx = index / partial_m_stride;
index -= idx * partial_m_stride;
result += idx * m_strides[i];
}
result += index;
}
// Value of index_coords[omitted_dim] is not determined to this step
return result;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getIndexFromOffset(Index base, Index omitted_dim, Index offset) const {
Index result = base + offset * m_strides[omitted_dim] ;
return result;
}
protected:
Index m_size;
const FFT& m_fft;
Dimensions m_dimensions;
array<Index, NumDims> m_strides;
TensorEvaluator<ArgType, Device> m_impl;
CoeffReturnType* m_data;
const Device& m_device;
// This will support a maximum FFT size of 2^32 for each dimension
// m_sin_PI_div_n_LUT[i] = (-2) * std::sin(M_PI / std::pow(2,i)) ^ 2;
const RealScalar m_sin_PI_div_n_LUT[32] = {
RealScalar(0.0),
RealScalar(-2),
RealScalar(-0.999999999999999),
RealScalar(-0.292893218813453),
RealScalar(-0.0761204674887130),
RealScalar(-0.0192147195967696),
RealScalar(-0.00481527332780311),
RealScalar(-0.00120454379482761),
RealScalar(-3.01181303795779e-04),
RealScalar(-7.52981608554592e-05),
RealScalar(-1.88247173988574e-05),
RealScalar(-4.70619042382852e-06),
RealScalar(-1.17654829809007e-06),
RealScalar(-2.94137117780840e-07),
RealScalar(-7.35342821488550e-08),
RealScalar(-1.83835707061916e-08),
RealScalar(-4.59589268710903e-09),
RealScalar(-1.14897317243732e-09),
RealScalar(-2.87243293150586e-10),
RealScalar( -7.18108232902250e-11),
RealScalar(-1.79527058227174e-11),
RealScalar(-4.48817645568941e-12),
RealScalar(-1.12204411392298e-12),
RealScalar(-2.80511028480785e-13),
RealScalar(-7.01277571201985e-14),
RealScalar(-1.75319392800498e-14),
RealScalar(-4.38298482001247e-15),
RealScalar(-1.09574620500312e-15),
RealScalar(-2.73936551250781e-16),
RealScalar(-6.84841378126949e-17),
RealScalar(-1.71210344531737e-17),
RealScalar(-4.28025861329343e-18)
};
// m_minus_sin_2_PI_div_n_LUT[i] = -std::sin(2 * M_PI / std::pow(2,i));
const RealScalar m_minus_sin_2_PI_div_n_LUT[32] = {
RealScalar(0.0),
RealScalar(0.0),
RealScalar(-1.00000000000000e+00),
RealScalar(-7.07106781186547e-01),
RealScalar(-3.82683432365090e-01),
RealScalar(-1.95090322016128e-01),
RealScalar(-9.80171403295606e-02),
RealScalar(-4.90676743274180e-02),
RealScalar(-2.45412285229123e-02),
RealScalar(-1.22715382857199e-02),
RealScalar(-6.13588464915448e-03),
RealScalar(-3.06795676296598e-03),
RealScalar(-1.53398018628477e-03),
RealScalar(-7.66990318742704e-04),
RealScalar(-3.83495187571396e-04),
RealScalar(-1.91747597310703e-04),
RealScalar(-9.58737990959773e-05),
RealScalar(-4.79368996030669e-05),
RealScalar(-2.39684498084182e-05),
RealScalar(-1.19842249050697e-05),
RealScalar(-5.99211245264243e-06),
RealScalar(-2.99605622633466e-06),
RealScalar(-1.49802811316901e-06),
RealScalar(-7.49014056584716e-07),
RealScalar(-3.74507028292384e-07),
RealScalar(-1.87253514146195e-07),
RealScalar(-9.36267570730981e-08),
RealScalar(-4.68133785365491e-08),
RealScalar(-2.34066892682746e-08),
RealScalar(-1.17033446341373e-08),
RealScalar(-5.85167231706864e-09),
RealScalar(-2.92583615853432e-09)
};
};
} // end namespace Eigen
#endif // EIGEN_HAS_CONSTEXPR
#endif // EIGEN_CXX11_TENSOR_TENSOR_FFT_H
| 23,299 | 34.736196 | 226 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
#define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
namespace Eigen {
/** \class TensorFixedSize
* \ingroup CXX11_Tensor_Module
*
* \brief The fixed sized version of the tensor class.
*
* The fixed sized equivalent of
* Eigen::Tensor<float, 3> t(3, 5, 7);
* is
* Eigen::TensorFixedSize<float, Size<3,5,7>> t;
*/
template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> >
{
public:
typedef TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> Self;
typedef TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> > Base;
typedef typename Eigen::internal::nested<Self>::type Nested;
typedef typename internal::traits<Self>::StorageKind StorageKind;
typedef typename internal::traits<Self>::Index Index;
typedef Scalar_ Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename Base::CoeffReturnType CoeffReturnType;
static const int Options = Options_;
enum {
IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0),
Layout = Options_ & RowMajor ? RowMajor : ColMajor,
CoordAccess = true,
RawAccess = true
};
typedef Dimensions_ Dimensions;
static const std::size_t NumIndices = Dimensions::count;
protected:
TensorStorage<Scalar, Dimensions, Options> m_storage;
public:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
// This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
// work, because that uses base().coeffRef() - and we don't yet
// implement a similar class hierarchy
inline Self& base() { return *this; }
inline const Self& base() const { return *this; }
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
{
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
{
eigen_internal_assert(checkIndexRange(indices));
return m_storage.data()[linearizedIndex(indices)];
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
{
eigen_internal_assert(index >= 0 && index < size());
return m_storage.data()[index];
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& coeff() const
{
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
return m_storage.data()[0];
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
{
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
{
eigen_internal_assert(checkIndexRange(indices));
return m_storage.data()[linearizedIndex(indices)];
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{
eigen_internal_assert(index >= 0 && index < size());
return m_storage.data()[index];
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& coeffRef()
{
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
return m_storage.data()[0];
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
{
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
{
if (Options&RowMajor) {
const Index index = i1 + i0 * m_storage.dimensions()[1];
return m_storage.data()[index];
} else {
const Index index = i0 + i1 * m_storage.dimensions()[0];
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
{
if (Options&RowMajor) {
const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
{
if (Options&RowMajor) {
const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
{
if (Options&RowMajor) {
const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
return m_storage.data()[index];
}
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
{
eigen_assert(checkIndexRange(indices));
return coeff(indices);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
{
eigen_internal_assert(index >= 0 && index < size());
return coeff(index);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()() const
{
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
return coeff();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
{
// The bracket operator is only for vectors, use the parenthesis operator instead.
EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
return coeff(index);
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
{
// The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
#else
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
{
if (Options&RowMajor) {
const Index index = i1 + i0 * m_storage.dimensions()[1];
return m_storage.data()[index];
} else {
const Index index = i0 + i1 * m_storage.dimensions()[0];
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
{
if (Options&RowMajor) {
const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
{
if (Options&RowMajor) {
const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
return m_storage.data()[index];
}
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
{
if (Options&RowMajor) {
const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
return m_storage.data()[index];
} else {
const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
return m_storage.data()[index];
}
}
#endif
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
{
eigen_assert(checkIndexRange(indices));
return coeffRef(indices);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()(Index index)
{
eigen_assert(index >= 0 && index < size());
return coeffRef(index);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator()()
{
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
return coeffRef();
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Scalar& operator[](Index index)
{
// The bracket operator is only for vectors, use the parenthesis operator instead
EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
return coeffRef(index);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorFixedSize()
: m_storage()
{
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorFixedSize(const Self& other)
: m_storage(other.m_storage)
{
}
#if EIGEN_HAS_RVALUE_REFERENCES
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other)
: m_storage(other.m_storage)
{
}
#endif
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
{
typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign;
Assign assign(*this, other.derived());
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, WriteAccessors>& other)
{
typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign;
Assign assign(*this, other.derived());
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorFixedSize& operator=(const TensorFixedSize& other)
{
// FIXME: check that the dimensions of other match the dimensions of *this.
// Unfortunately this isn't possible yet when the rhs is an expression.
typedef TensorAssignOp<Self, const TensorFixedSize> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this;
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorFixedSize& operator=(const OtherDerived& other)
{
// FIXME: check that the dimensions of other match the dimensions of *this.
// Unfortunately this isn't possible yet when the rhs is an expression.
typedef TensorAssignOp<Self, const OtherDerived> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this;
}
protected:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE bool checkIndexRange(const array<Index, NumIndices>& /*indices*/) const
{
using internal::array_apply_and_reduce;
using internal::array_zip_and_reduce;
using internal::greater_equal_zero_op;
using internal::logical_and_op;
using internal::lesser_op;
return true;
// check whether the indices are all >= 0
/* array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
// check whether the indices fit in the dimensions
array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());*/
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
{
if (Options&RowMajor) {
return m_storage.dimensions().IndexOfRowMajor(indices);
} else {
return m_storage.dimensions().IndexOfColMajor(indices);
}
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
| 14,916 | 37.248718 | 169 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
#define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
namespace Eigen {
/** \class TensorForcedEval
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor reshaping class.
*
*
*/
/// template <class> class MakePointer_ is added to convert the host pointer to the device pointer.
/// It is added due to the fact that for our device compiler T* is not allowed.
/// If we wanted to use the same Evaluator functions we have to convert that type to our pointer T.
/// This is done through our MakePointer_ class. By default the Type in the MakePointer_<T> is T* .
/// Therefore, by adding the default value, we managed to convert the type and it does not break any
/// existing code as its default value is T*.
namespace internal {
template<typename XprType, template <class> class MakePointer_>
struct traits<TensorForcedEvalOp<XprType, MakePointer_> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind;
typedef typename traits<XprType>::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
enum {
Flags = 0
};
template <class T> struct MakePointer {
// Intermediate typedef to workaround MSVC issue.
typedef MakePointer_<T> MakePointerT;
typedef typename MakePointerT::Type Type;
};
};
template<typename XprType, template <class> class MakePointer_>
struct eval<TensorForcedEvalOp<XprType, MakePointer_>, Eigen::Dense>
{
typedef const TensorForcedEvalOp<XprType, MakePointer_>& type;
};
template<typename XprType, template <class> class MakePointer_>
struct nested<TensorForcedEvalOp<XprType, MakePointer_>, 1, typename eval<TensorForcedEvalOp<XprType, MakePointer_> >::type>
{
typedef TensorForcedEvalOp<XprType, MakePointer_> type;
};
} // end namespace internal
template<typename XprType, template <class> class MakePointer_>
class TensorForcedEvalOp : public TensorBase<TensorForcedEvalOp<XprType, MakePointer_>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename Eigen::internal::nested<TensorForcedEvalOp>::type Nested;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr)
: m_xpr(expr) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
};
template<typename ArgType, typename Device, template <class> class MakePointer_>
struct TensorEvaluator<const TensorForcedEvalOp<ArgType, MakePointer_>, Device>
{
typedef TensorForcedEvalOp<ArgType, MakePointer_> XprType;
typedef typename ArgType::Scalar Scalar;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = true,
PacketAccess = (PacketSize > 1),
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = true
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
/// op_ is used for sycl
: m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL)
{ }
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
const Index numValues = internal::array_prod(m_impl.dimensions());
m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType));
// Should initialize the memory in case we're dealing with non POD types.
if (NumTraits<CoeffReturnType>::RequireInitialization) {
for (Index i = 0; i < numValues; ++i) {
new(m_buffer+i) CoeffReturnType();
}
}
typedef TensorEvalToOp< const typename internal::remove_const<ArgType>::type > EvalTo;
EvalTo evalToTmp(m_buffer, m_op);
const bool PacketAccess = internal::IsVectorizable<Device, const ArgType>::value;
internal::TensorExecutor<const EvalTo, typename internal::remove_const<Device>::type, PacketAccess>::run(evalToTmp, m_device);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_device.deallocate(m_buffer);
m_buffer = NULL;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return m_buffer[index];
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return internal::ploadt<PacketReturnType, LoadMode>(m_buffer + index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC typename MakePointer<Scalar>::Type data() const { return m_buffer; }
/// required by sycl in order to extract the sycl accessor
const TensorEvaluator<ArgType, Device>& impl() { return m_impl; }
/// used by sycl in order to build the sycl buffer
const Device& device() const{return m_device;}
private:
TensorEvaluator<ArgType, Device> m_impl;
const ArgType m_op;
const Device& m_device;
typename MakePointer<CoeffReturnType>::Type m_buffer;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
| 6,508 | 37.744048 | 130 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
#define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
namespace Eigen {
// MakePointer class is used as a container of the adress space of the pointer
// on the host and on the device. From the host side it generates the T* pointer
// and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to
// T* m_data on the host. It is always called on the device.
// Specialisation of MakePointer class for creating the sycl buffer with
// map_allocator.
template<typename T> struct MakePointer {
typedef T* Type;
};
template<typename PlainObjectType, int Options_ = Unaligned, template <class> class MakePointer_ = MakePointer> class TensorMap;
template<typename Scalar_, int NumIndices_, int Options_ = 0, typename IndexType = DenseIndex> class Tensor;
template<typename Scalar_, typename Dimensions, int Options_ = 0, typename IndexType = DenseIndex> class TensorFixedSize;
template<typename PlainObjectType> class TensorRef;
template<typename Derived, int AccessLevel> class TensorBase;
template<typename NullaryOp, typename PlainObjectType> class TensorCwiseNullaryOp;
template<typename UnaryOp, typename XprType> class TensorCwiseUnaryOp;
template<typename BinaryOp, typename LeftXprType, typename RightXprType> class TensorCwiseBinaryOp;
template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType> class TensorCwiseTernaryOp;
template<typename IfXprType, typename ThenXprType, typename ElseXprType> class TensorSelectOp;
template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_ = MakePointer > class TensorReductionOp;
template<typename XprType> class TensorIndexTupleOp;
template<typename ReduceOp, typename Dims, typename XprType> class TensorTupleReducerOp;
template<typename Axis, typename LeftXprType, typename RightXprType> class TensorConcatenationOp;
template<typename Dimensions, typename LeftXprType, typename RightXprType> class TensorContractionOp;
template<typename TargetType, typename XprType> class TensorConversionOp;
template<typename Dimensions, typename InputXprType, typename KernelXprType> class TensorConvolutionOp;
template<typename FFT, typename XprType, int FFTDataType, int FFTDirection> class TensorFFTOp;
template<typename PatchDim, typename XprType> class TensorPatchOp;
template<DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorImagePatchOp;
template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorVolumePatchOp;
template<typename Broadcast, typename XprType> class TensorBroadcastingOp;
template<DenseIndex DimId, typename XprType> class TensorChippingOp;
template<typename NewDimensions, typename XprType> class TensorReshapingOp;
template<typename XprType> class TensorLayoutSwapOp;
template<typename StartIndices, typename Sizes, typename XprType> class TensorSlicingOp;
template<typename ReverseDimensions, typename XprType> class TensorReverseOp;
template<typename PaddingDimensions, typename XprType> class TensorPaddingOp;
template<typename Shuffle, typename XprType> class TensorShufflingOp;
template<typename Strides, typename XprType> class TensorStridingOp;
template<typename StartIndices, typename StopIndices, typename Strides, typename XprType> class TensorStridingSlicingOp;
template<typename Strides, typename XprType> class TensorInflationOp;
template<typename Generator, typename XprType> class TensorGeneratorOp;
template<typename LeftXprType, typename RightXprType> class TensorAssignOp;
template<typename Op, typename XprType> class TensorScanOp;
template<typename CustomUnaryFunc, typename XprType> class TensorCustomUnaryOp;
template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType> class TensorCustomBinaryOp;
template<typename XprType, template <class> class MakePointer_ = MakePointer> class TensorEvalToOp;
template<typename XprType, template <class> class MakePointer_ = MakePointer> class TensorForcedEvalOp;
template<typename ExpressionType, typename DeviceType> class TensorDevice;
template<typename Derived, typename Device> struct TensorEvaluator;
struct DefaultDevice;
struct ThreadPoolDevice;
struct GpuDevice;
struct SyclDevice;
enum FFTResultType {
RealPart = 0,
ImagPart = 1,
BothParts = 2
};
enum FFTDirection {
FFT_FORWARD = 0,
FFT_REVERSE = 1
};
namespace internal {
template <typename Device, typename Expression>
struct IsVectorizable {
static const bool value = TensorEvaluator<Expression, Device>::PacketAccess;
};
template <typename Expression>
struct IsVectorizable<GpuDevice, Expression> {
static const bool value = TensorEvaluator<Expression, GpuDevice>::PacketAccess &&
TensorEvaluator<Expression, GpuDevice>::IsAligned;
};
template <typename Expression, typename Device,
bool Vectorizable = IsVectorizable<Device, Expression>::value>
class TensorExecutor;
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
| 5,412 | 48.209091 | 131 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
#define EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
namespace Eigen {
namespace internal {
/** \internal
* \brief Template functor to compute the modulo between an array and a scalar.
*/
template <typename Scalar>
struct scalar_mod_op {
EIGEN_DEVICE_FUNC scalar_mod_op(const Scalar& divisor) : m_divisor(divisor) {}
EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a % m_divisor; }
const Scalar m_divisor;
};
template <typename Scalar>
struct functor_traits<scalar_mod_op<Scalar> >
{ enum { Cost = scalar_div_cost<Scalar,false>::value, PacketAccess = false }; };
/** \internal
* \brief Template functor to compute the modulo between 2 arrays.
*/
template <typename Scalar>
struct scalar_mod2_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_mod2_op);
EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a, const Scalar& b) const { return a % b; }
};
template <typename Scalar>
struct functor_traits<scalar_mod2_op<Scalar> >
{ enum { Cost = scalar_div_cost<Scalar,false>::value, PacketAccess = false }; };
template <typename Scalar>
struct scalar_fmod_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_fmod_op);
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& a, const Scalar& b) const {
return numext::fmod(a, b);
}
};
template <typename Scalar>
struct functor_traits<scalar_fmod_op<Scalar> > {
enum { Cost = 13, // Reciprocal throughput of FPREM on Haswell.
PacketAccess = false };
};
/** \internal
* \brief Template functor to compute the sigmoid of a scalar
* \sa class CwiseUnaryOp, ArrayBase::sigmoid()
*/
template <typename T>
struct scalar_sigmoid_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_sigmoid_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const {
const T one = T(1);
return one / (one + numext::exp(-x));
}
template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Packet packetOp(const Packet& x) const {
const Packet one = pset1<Packet>(T(1));
return pdiv(one, padd(one, pexp(pnegate(x))));
}
};
template <typename T>
struct functor_traits<scalar_sigmoid_op<T> > {
enum {
Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost * 6,
PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasDiv &&
packet_traits<T>::HasNegate && packet_traits<T>::HasExp
};
};
template<typename Reducer, typename Device>
struct reducer_traits {
enum {
Cost = 1,
PacketAccess = false
};
};
// Standard reduction functors
template <typename T> struct SumReducer
{
static const bool PacketAccess = packet_traits<T>::HasAdd;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
internal::scalar_sum_op<T> sum_op;
*accum = sum_op(*accum, t);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = padd<Packet>(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
internal::scalar_cast_op<int, T> conv;
return conv(0);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return vaccum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
internal::scalar_sum_op<T> sum_op;
return sum_op(saccum, predux(vaccum));
}
};
template <typename T, typename Device>
struct reducer_traits<SumReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = PacketType<T, Device>::HasAdd
};
};
template <typename T> struct MeanReducer
{
static const bool PacketAccess = packet_traits<T>::HasAdd && !NumTraits<T>::IsInteger;
static const bool IsStateful = true;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
MeanReducer() : scalarCount_(0), packetCount_(0) { }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) {
internal::scalar_sum_op<T> sum_op;
*accum = sum_op(*accum, t);
scalarCount_++;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) {
(*accum) = padd<Packet>(*accum, p);
packetCount_++;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
internal::scalar_cast_op<int, T> conv;
return conv(0);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum / scalarCount_;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return pdiv(vaccum, pset1<Packet>(packetCount_));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
internal::scalar_sum_op<T> sum_op;
return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits<Packet>::size);
}
protected:
DenseIndex scalarCount_;
DenseIndex packetCount_;
};
template <typename T, typename Device>
struct reducer_traits<MeanReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = PacketType<T, Device>::HasAdd
};
};
template <typename T, bool IsMax = true, bool IsInteger = true>
struct MinMaxBottomValue {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
return Eigen::NumTraits<T>::lowest();
}
};
template <typename T>
struct MinMaxBottomValue<T, true, false> {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
return -Eigen::NumTraits<T>::infinity();
}
};
template <typename T>
struct MinMaxBottomValue<T, false, true> {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
return Eigen::NumTraits<T>::highest();
}
};
template <typename T>
struct MinMaxBottomValue<T, false, false> {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
return Eigen::NumTraits<T>::infinity();
}
};
template <typename T> struct MaxReducer
{
static const bool PacketAccess = packet_traits<T>::HasMax;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
if (t > *accum) { *accum = t; }
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = pmax<Packet>(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return MinMaxBottomValue<T, true, Eigen::NumTraits<T>::IsInteger>::bottom_value();
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return vaccum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
return numext::maxi(saccum, predux_max(vaccum));
}
};
template <typename T, typename Device>
struct reducer_traits<MaxReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = PacketType<T, Device>::HasMax
};
};
template <typename T> struct MinReducer
{
static const bool PacketAccess = packet_traits<T>::HasMin;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
if (t < *accum) { *accum = t; }
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = pmin<Packet>(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return MinMaxBottomValue<T, false, Eigen::NumTraits<T>::IsInteger>::bottom_value();
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return vaccum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
return numext::mini(saccum, predux_min(vaccum));
}
};
template <typename T, typename Device>
struct reducer_traits<MinReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = PacketType<T, Device>::HasMin
};
};
template <typename T> struct ProdReducer
{
static const bool PacketAccess = packet_traits<T>::HasMul;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
internal::scalar_product_op<T> prod_op;
(*accum) = prod_op(*accum, t);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = pmul<Packet>(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
internal::scalar_cast_op<int, T> conv;
return conv(1);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return vaccum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
internal::scalar_product_op<T> prod_op;
return prod_op(saccum, predux_mul(vaccum));
}
};
template <typename T, typename Device>
struct reducer_traits<ProdReducer<T>, Device> {
enum {
Cost = NumTraits<T>::MulCost,
PacketAccess = PacketType<T, Device>::HasMul
};
};
struct AndReducer
{
static const bool PacketAccess = false;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const {
*accum = *accum && t;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const {
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const {
return accum;
}
};
template <typename Device>
struct reducer_traits<AndReducer, Device> {
enum {
Cost = 1,
PacketAccess = false
};
};
struct OrReducer {
static const bool PacketAccess = false;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const {
*accum = *accum || t;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const {
return false;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const {
return accum;
}
};
template <typename Device>
struct reducer_traits<OrReducer, Device> {
enum {
Cost = 1,
PacketAccess = false
};
};
// Argmin/Argmax reducers
template <typename T> struct ArgMaxTupleReducer
{
static const bool PacketAccess = false;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
if (t.second > accum->second) { *accum = t; }
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return T(0, NumTraits<typename T::second_type>::lowest());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const {
return accum;
}
};
template <typename T, typename Device>
struct reducer_traits<ArgMaxTupleReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = false
};
};
template <typename T> struct ArgMinTupleReducer
{
static const bool PacketAccess = false;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T& t, T* accum) const {
if (t.second < accum->second) { *accum = t; }
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return T(0, NumTraits<typename T::second_type>::highest());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const {
return accum;
}
};
template <typename T, typename Device>
struct reducer_traits<ArgMinTupleReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = false
};
};
template <typename T, typename Index, size_t NumDims>
class GaussianGenerator {
public:
static const bool PacketAccess = false;
EIGEN_DEVICE_FUNC GaussianGenerator(const array<T, NumDims>& means,
const array<T, NumDims>& std_devs)
: m_means(means)
{
for (size_t i = 0; i < NumDims; ++i) {
m_two_sigmas[i] = std_devs[i] * std_devs[i] * 2;
}
}
EIGEN_DEVICE_FUNC T operator()(const array<Index, NumDims>& coordinates) const {
T tmp = T(0);
for (size_t i = 0; i < NumDims; ++i) {
T offset = coordinates[i] - m_means[i];
tmp += offset * offset / m_two_sigmas[i];
}
return numext::exp(-tmp);
}
private:
array<T, NumDims> m_means;
array<T, NumDims> m_two_sigmas;
};
template <typename T, typename Index, size_t NumDims>
struct functor_traits<GaussianGenerator<T, Index, NumDims> > {
enum {
Cost = NumDims * (2 * NumTraits<T>::AddCost + NumTraits<T>::MulCost +
functor_traits<scalar_quotient_op<T, T> >::Cost) +
functor_traits<scalar_exp_op<T> >::Cost,
PacketAccess = GaussianGenerator<T, Index, NumDims>::PacketAccess
};
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
| 14,625 | 28.84898 | 106 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
namespace Eigen {
/** \class TensorGenerator
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor generator class.
*
*
*/
namespace internal {
template<typename Generator, typename XprType>
struct traits<TensorGeneratorOp<Generator, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
};
template<typename Generator, typename XprType>
struct eval<TensorGeneratorOp<Generator, XprType>, Eigen::Dense>
{
typedef const TensorGeneratorOp<Generator, XprType>& type;
};
template<typename Generator, typename XprType>
struct nested<TensorGeneratorOp<Generator, XprType>, 1, typename eval<TensorGeneratorOp<Generator, XprType> >::type>
{
typedef TensorGeneratorOp<Generator, XprType> type;
};
} // end namespace internal
template<typename Generator, typename XprType>
class TensorGeneratorOp : public TensorBase<TensorGeneratorOp<Generator, XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorGeneratorOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorGeneratorOp>::type Nested;
typedef typename Eigen::internal::traits<TensorGeneratorOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorGeneratorOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(const XprType& expr, const Generator& generator)
: m_xpr(expr), m_generator(generator) {}
EIGEN_DEVICE_FUNC
const Generator& generator() const { return m_generator; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
const Generator m_generator;
};
// Eval as rvalue
template<typename Generator, typename ArgType, typename Device>
struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
{
typedef TensorGeneratorOp<Generator, ArgType> XprType;
typedef typename XprType::Index Index;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
static const int NumDims = internal::array_size<Dimensions>::value;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
IsAligned = false,
PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1),
BlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_generator(op.generator())
{
TensorEvaluator<ArgType, Device> impl(op.expression(), device);
m_dimensions = impl.dimensions();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_strides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
}
} else {
m_strides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
array<Index, NumDims> coords;
extract_coordinates(index, coords);
return m_generator(coords);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+packetSize-1 < dimensions().TotalSize());
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize];
for (int i = 0; i < packetSize; ++i) {
values[i] = coeff(index+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool) const {
// TODO(rmlarsen): This is just a placeholder. Define interface to make
// generators return their cost.
return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() +
TensorOpCost::MulCost<Scalar>());
}
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void extract_coordinates(Index index, array<Index, NumDims>& coords) const {
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_strides[i];
index -= idx * m_strides[i];
coords[i] = idx;
}
coords[0] = index;
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_strides[i];
index -= idx * m_strides[i];
coords[i] = idx;
}
coords[NumDims-1] = index;
}
}
Dimensions m_dimensions;
array<Index, NumDims> m_strides;
Generator m_generator;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
| 6,339 | 33.086022 | 116 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Eugene Brevdo <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
#define EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
namespace Eigen {
/** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given tensors.
*
* This function computes the regularized incomplete beta function (integral).
*
*/
template <typename ADerived, typename BDerived, typename XDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const
TensorCwiseTernaryOp<internal::scalar_betainc_op<typename XDerived::Scalar>,
const ADerived, const BDerived, const XDerived>
betainc(const ADerived& a, const BDerived& b, const XDerived& x) {
return TensorCwiseTernaryOp<
internal::scalar_betainc_op<typename XDerived::Scalar>, const ADerived,
const BDerived, const XDerived>(
a, b, x, internal::scalar_betainc_op<typename XDerived::Scalar>());
}
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
| 1,316 | 37.735294 | 105 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_IO_H
#define EIGEN_CXX11_TENSOR_TENSOR_IO_H
namespace Eigen {
namespace internal {
// Print the tensor as a 2d matrix
template <typename Tensor, int Rank>
struct TensorPrinter {
static void run (std::ostream& os, const Tensor& tensor) {
typedef typename internal::remove_const<typename Tensor::Scalar>::type Scalar;
typedef typename Tensor::Index Index;
const Index total_size = internal::array_prod(tensor.dimensions());
if (total_size > 0) {
const Index first_dim = Eigen::internal::array_get<0>(tensor.dimensions());
static const int layout = Tensor::Layout;
Map<const Array<Scalar, Dynamic, Dynamic, layout> > matrix(const_cast<Scalar*>(tensor.data()), first_dim, total_size/first_dim);
os << matrix;
}
}
};
// Print the tensor as a vector
template <typename Tensor>
struct TensorPrinter<Tensor, 1> {
static void run (std::ostream& os, const Tensor& tensor) {
typedef typename internal::remove_const<typename Tensor::Scalar>::type Scalar;
typedef typename Tensor::Index Index;
const Index total_size = internal::array_prod(tensor.dimensions());
if (total_size > 0) {
Map<const Array<Scalar, Dynamic, 1> > array(const_cast<Scalar*>(tensor.data()), total_size);
os << array;
}
}
};
// Print the tensor as a scalar
template <typename Tensor>
struct TensorPrinter<Tensor, 0> {
static void run (std::ostream& os, const Tensor& tensor) {
os << tensor.coeff(0);
}
};
}
template <typename T>
std::ostream& operator << (std::ostream& os, const TensorBase<T, ReadOnlyAccessors>& expr) {
typedef TensorEvaluator<const TensorForcedEvalOp<const T>, DefaultDevice> Evaluator;
typedef typename Evaluator::Dimensions Dimensions;
// Evaluate the expression if needed
TensorForcedEvalOp<const T> eval = expr.eval();
Evaluator tensor(eval, DefaultDevice());
tensor.evalSubExprsIfNeeded(NULL);
// Print the result
static const int rank = internal::array_size<Dimensions>::value;
internal::TensorPrinter<Evaluator, rank>::run(os, tensor);
// Cleanup.
tensor.cleanup();
return os;
}
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_IO_H
| 2,560 | 31.0125 | 134 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
#define EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
namespace Eigen {
/** \class TensorImagePatch
* \ingroup CXX11_Tensor_Module
*
* \brief Patch extraction specialized for image processing.
* This assumes that the input has a least 3 dimensions ordered as follow:
* 1st dimension: channels (of size d)
* 2nd dimension: rows (of size r)
* 3rd dimension: columns (of size c)
* There can be additional dimensions such as time (for video) or batch (for
* bulk processing after the first 3.
* Calling the image patch code with patch_rows and patch_cols is equivalent
* to calling the regular patch extraction code with parameters d, patch_rows,
* patch_cols, and 1 for all the additional dimensions.
*/
namespace internal {
template<DenseIndex Rows, DenseIndex Cols, typename XprType>
struct traits<TensorImagePatchOp<Rows, Cols, XprType> > : public traits<XprType>
{
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions + 1;
static const int Layout = XprTraits::Layout;
};
template<DenseIndex Rows, DenseIndex Cols, typename XprType>
struct eval<TensorImagePatchOp<Rows, Cols, XprType>, Eigen::Dense>
{
typedef const TensorImagePatchOp<Rows, Cols, XprType>& type;
};
template<DenseIndex Rows, DenseIndex Cols, typename XprType>
struct nested<TensorImagePatchOp<Rows, Cols, XprType>, 1, typename eval<TensorImagePatchOp<Rows, Cols, XprType> >::type>
{
typedef TensorImagePatchOp<Rows, Cols, XprType> type;
};
} // end namespace internal
template<DenseIndex Rows, DenseIndex Cols, typename XprType>
class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorImagePatchOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorImagePatchOp>::type Nested;
typedef typename Eigen::internal::traits<TensorImagePatchOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorImagePatchOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
DenseIndex row_strides, DenseIndex col_strides,
DenseIndex in_row_strides, DenseIndex in_col_strides,
DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
PaddingType padding_type, Scalar padding_value)
: m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
m_row_strides(row_strides), m_col_strides(col_strides),
m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
m_padding_type(padding_type), m_padding_value(padding_value) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
DenseIndex row_strides, DenseIndex col_strides,
DenseIndex in_row_strides, DenseIndex in_col_strides,
DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
DenseIndex padding_top, DenseIndex padding_bottom,
DenseIndex padding_left, DenseIndex padding_right,
Scalar padding_value)
: m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
m_row_strides(row_strides), m_col_strides(col_strides),
m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
m_padding_left(padding_left), m_padding_right(padding_right),
m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
EIGEN_DEVICE_FUNC
DenseIndex patch_rows() const { return m_patch_rows; }
EIGEN_DEVICE_FUNC
DenseIndex patch_cols() const { return m_patch_cols; }
EIGEN_DEVICE_FUNC
DenseIndex row_strides() const { return m_row_strides; }
EIGEN_DEVICE_FUNC
DenseIndex col_strides() const { return m_col_strides; }
EIGEN_DEVICE_FUNC
DenseIndex in_row_strides() const { return m_in_row_strides; }
EIGEN_DEVICE_FUNC
DenseIndex in_col_strides() const { return m_in_col_strides; }
EIGEN_DEVICE_FUNC
DenseIndex row_inflate_strides() const { return m_row_inflate_strides; }
EIGEN_DEVICE_FUNC
DenseIndex col_inflate_strides() const { return m_col_inflate_strides; }
EIGEN_DEVICE_FUNC
bool padding_explicit() const { return m_padding_explicit; }
EIGEN_DEVICE_FUNC
DenseIndex padding_top() const { return m_padding_top; }
EIGEN_DEVICE_FUNC
DenseIndex padding_bottom() const { return m_padding_bottom; }
EIGEN_DEVICE_FUNC
DenseIndex padding_left() const { return m_padding_left; }
EIGEN_DEVICE_FUNC
DenseIndex padding_right() const { return m_padding_right; }
EIGEN_DEVICE_FUNC
PaddingType padding_type() const { return m_padding_type; }
EIGEN_DEVICE_FUNC
Scalar padding_value() const { return m_padding_value; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
const DenseIndex m_patch_rows;
const DenseIndex m_patch_cols;
const DenseIndex m_row_strides;
const DenseIndex m_col_strides;
const DenseIndex m_in_row_strides;
const DenseIndex m_in_col_strides;
const DenseIndex m_row_inflate_strides;
const DenseIndex m_col_inflate_strides;
const bool m_padding_explicit;
const DenseIndex m_padding_top;
const DenseIndex m_padding_bottom;
const DenseIndex m_padding_left;
const DenseIndex m_padding_right;
const PaddingType m_padding_type;
const Scalar m_padding_value;
};
// Eval as rvalue
template<DenseIndex Rows, DenseIndex Cols, typename ArgType, typename Device>
struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
{
typedef TensorImagePatchOp<Rows, Cols, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
static const int NumDims = NumInputDims + 1;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>,
Device> Self;
typedef TensorEvaluator<ArgType, Device> Impl;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false,
RawAccess = false
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device)
{
EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE);
m_paddingValue = op.padding_value();
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
// Caches a few variables.
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_inputDepth = input_dims[0];
m_inputRows = input_dims[1];
m_inputCols = input_dims[2];
} else {
m_inputDepth = input_dims[NumInputDims-1];
m_inputRows = input_dims[NumInputDims-2];
m_inputCols = input_dims[NumInputDims-3];
}
m_row_strides = op.row_strides();
m_col_strides = op.col_strides();
// Input strides and effective input/patch size
m_in_row_strides = op.in_row_strides();
m_in_col_strides = op.in_col_strides();
m_row_inflate_strides = op.row_inflate_strides();
m_col_inflate_strides = op.col_inflate_strides();
// The "effective" input rows and input cols are the input rows and cols
// after inflating them with zeros.
// For examples, a 2x3 matrix with row_inflate_strides and
// col_inflate_strides of 2 comes from:
// A B C
// D E F
//
// to a matrix is 3 x 5:
//
// A . B . C
// . . . . .
// D . E . F
m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
if (op.padding_explicit()) {
m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
m_rowPaddingTop = op.padding_top();
m_colPaddingLeft = op.padding_left();
} else {
// Computing padding from the type
switch (op.padding_type()) {
case PADDING_VALID:
m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
// Calculate the padding
m_rowPaddingTop = numext::maxi<Index>(0, ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2);
m_colPaddingLeft = numext::maxi<Index>(0, ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2);
break;
case PADDING_SAME:
m_outputRows = numext::ceil(m_input_rows_eff / static_cast<float>(m_row_strides));
m_outputCols = numext::ceil(m_input_cols_eff / static_cast<float>(m_col_strides));
// Calculate the padding
m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2;
m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2;
break;
default:
eigen_assert(false && "unexpected padding");
}
}
eigen_assert(m_outputRows > 0);
eigen_assert(m_outputCols > 0);
// Dimensions for result of extraction.
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
// ColMajor
// 0: depth
// 1: patch_rows
// 2: patch_cols
// 3: number of patches
// 4 and beyond: anything else (such as batch).
m_dimensions[0] = input_dims[0];
m_dimensions[1] = op.patch_rows();
m_dimensions[2] = op.patch_cols();
m_dimensions[3] = m_outputRows * m_outputCols;
for (int i = 4; i < NumDims; ++i) {
m_dimensions[i] = input_dims[i-1];
}
} else {
// RowMajor
// NumDims-1: depth
// NumDims-2: patch_rows
// NumDims-3: patch_cols
// NumDims-4: number of patches
// NumDims-5 and beyond: anything else (such as batch).
m_dimensions[NumDims-1] = input_dims[NumInputDims-1];
m_dimensions[NumDims-2] = op.patch_rows();
m_dimensions[NumDims-3] = op.patch_cols();
m_dimensions[NumDims-4] = m_outputRows * m_outputCols;
for (int i = NumDims-5; i >= 0; --i) {
m_dimensions[i] = input_dims[i];
}
}
// Strides for moving the patch in various dimensions.
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_colStride = m_dimensions[1];
m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0];
m_otherStride = m_patchStride * m_dimensions[3];
} else {
m_colStride = m_dimensions[NumDims-2];
m_patchStride = m_colStride * m_dimensions[NumDims-3] * m_dimensions[NumDims-1];
m_otherStride = m_patchStride * m_dimensions[NumDims-4];
}
// Strides for navigating through the input tensor.
m_rowInputStride = m_inputDepth;
m_colInputStride = m_inputDepth * m_inputRows;
m_patchInputStride = m_inputDepth * m_inputRows * m_inputCols;
// Fast representations of different variables.
m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
m_fastInflateRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
m_fastInflateColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
// Number of patches in the width dimension.
m_fastOutputRows = internal::TensorIntDivisor<Index>(m_outputRows);
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
} else {
m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
// Patch index corresponding to the passed in index.
const Index patchIndex = index / m_fastPatchStride;
// Find the offset of the element wrt the location of the first element.
const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
// Other ways to index this element.
const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride;
const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
// Calculate col index in the input original tensor.
const Index colIndex = patch2DIndex / m_fastOutputRows;
const Index colOffset = patchOffset / m_fastColStride;
const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInflateColStride) : 0);
if (inputCol < 0 || inputCol >= m_input_cols_eff ||
((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
return Scalar(m_paddingValue);
}
// Calculate row index in the original input tensor.
const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
const Index rowOffset = patchOffset - colOffset * m_colStride;
const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInflateRowStride) : 0);
if (inputRow < 0 || inputRow >= m_input_rows_eff ||
((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
return Scalar(m_paddingValue);
}
const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
const Index inputIndex = depth + origInputRow * m_rowInputStride + origInputCol * m_colInputStride + otherIndex * m_patchInputStride;
return m_impl.coeff(inputIndex);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1) {
return packetWithPossibleZero(index);
}
const Index indices[2] = {index, index + PacketSize - 1};
const Index patchIndex = indices[0] / m_fastPatchStride;
if (patchIndex != indices[1] / m_fastPatchStride) {
return packetWithPossibleZero(index);
}
const Index otherIndex = (NumDims == 4) ? 0 : indices[0] / m_fastOtherStride;
eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
// Find the offset of the element wrt the location of the first element.
const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
(indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
const Index colIndex = patch2DIndex / m_fastOutputRows;
const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride};
// Calculate col indices in the original input tensor.
const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] -
m_colPaddingLeft, colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
}
if (inputCols[0] == inputCols[1]) {
const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride};
eigen_assert(rowOffsets[0] <= rowOffsets[1]);
// Calculate col indices in the original input tensor.
const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] -
m_rowPaddingTop, rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
}
if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) {
// no padding
const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
const Index inputIndex = depth + inputRows[0] * m_rowInputStride + inputCols[0] * m_colInputStride + otherIndex * m_patchInputStride;
return m_impl.template packet<Unaligned>(inputIndex);
}
}
return packetWithPossibleZero(index);
}
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
Index rowPaddingTop() const { return m_rowPaddingTop; }
Index colPaddingLeft() const { return m_colPaddingLeft; }
Index outputRows() const { return m_outputRows; }
Index outputCols() const { return m_outputCols; }
Index userRowStride() const { return m_row_strides; }
Index userColStride() const { return m_col_strides; }
Index userInRowStride() const { return m_in_row_strides; }
Index userInColStride() const { return m_in_col_strides; }
Index rowInflateStride() const { return m_row_inflate_strides; }
Index colInflateStride() const { return m_col_inflate_strides; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
// We conservatively estimate the cost for the code path where the computed
// index is inside the original image and
// TensorEvaluator<ArgType, Device>::CoordAccess is false.
const double compute_cost = 3 * TensorOpCost::DivCost<Index>() +
6 * TensorOpCost::MulCost<Index>() +
8 * TensorOpCost::MulCost<Index>();
return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
}
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
{
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
for (int i = 0; i < PacketSize; ++i) {
values[i] = coeff(index+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
Dimensions m_dimensions;
Index m_otherStride;
Index m_patchStride;
Index m_colStride;
Index m_row_strides;
Index m_col_strides;
Index m_in_row_strides;
Index m_in_col_strides;
Index m_row_inflate_strides;
Index m_col_inflate_strides;
Index m_input_rows_eff;
Index m_input_cols_eff;
Index m_patch_rows_eff;
Index m_patch_cols_eff;
internal::TensorIntDivisor<Index> m_fastOtherStride;
internal::TensorIntDivisor<Index> m_fastPatchStride;
internal::TensorIntDivisor<Index> m_fastColStride;
internal::TensorIntDivisor<Index> m_fastInflateRowStride;
internal::TensorIntDivisor<Index> m_fastInflateColStride;
internal::TensorIntDivisor<Index> m_fastInputColsEff;
Index m_rowInputStride;
Index m_colInputStride;
Index m_patchInputStride;
Index m_inputDepth;
Index m_inputRows;
Index m_inputCols;
Index m_outputRows;
Index m_outputCols;
Index m_rowPaddingTop;
Index m_colPaddingLeft;
internal::TensorIntDivisor<Index> m_fastOutputRows;
internal::TensorIntDivisor<Index> m_fastOutputDepth;
Scalar m_paddingValue;
TensorEvaluator<ArgType, Device> m_impl;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
| 23,098 | 44.292157 | 156 |
h
|
abess
|
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H
#define EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H
#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES
#define EIGEN_HAS_INDEX_LIST
namespace Eigen {
/** \internal
*
* \class TensorIndexList
* \ingroup CXX11_Tensor_Module
*
* \brief Set of classes used to encode a set of Tensor dimensions/indices.
*
* The indices in the list can be known at compile time or at runtime. A mix
* of static and dynamic indices can also be provided if needed. The tensor
* code will attempt to take advantage of the indices that are known at
* compile time to optimize the code it generates.
*
* This functionality requires a c++11 compliant compiler. If your compiler
* is older you need to use arrays of indices instead.
*
* Several examples are provided in the cxx11_tensor_index_list.cpp file.
*
* \sa Tensor
*/
template <DenseIndex n>
struct type2index {
static const DenseIndex value = n;
EIGEN_DEVICE_FUNC constexpr operator DenseIndex() const { return n; }
EIGEN_DEVICE_FUNC void set(DenseIndex val) {
eigen_assert(val == n);
}
};
// This can be used with IndexPairList to get compile-time constant pairs,
// such as IndexPairList<type2indexpair<1,2>, type2indexpair<3,4>>().
template <DenseIndex f, DenseIndex s>
struct type2indexpair {
static const DenseIndex first = f;
static const DenseIndex second = s;
constexpr EIGEN_DEVICE_FUNC operator IndexPair<DenseIndex>() const {
return IndexPair<DenseIndex>(f, s);
}
EIGEN_DEVICE_FUNC void set(const IndexPair<DenseIndex>& val) {
eigen_assert(val.first == f);
eigen_assert(val.second == s);
}
};
template<DenseIndex n> struct NumTraits<type2index<n> >
{
typedef DenseIndex Real;
enum {
IsComplex = 0,
RequireInitialization = false,
ReadCost = 1,
AddCost = 1,
MulCost = 1
};
EIGEN_DEVICE_FUNC static inline Real epsilon() { return 0; }
EIGEN_DEVICE_FUNC static inline Real dummy_precision() { return 0; }
EIGEN_DEVICE_FUNC static inline Real highest() { return n; }
EIGEN_DEVICE_FUNC static inline Real lowest() { return n; }
};
namespace internal {
template <typename T>
EIGEN_DEVICE_FUNC void update_value(T& val, DenseIndex new_val) {
val = new_val;
}
template <DenseIndex n>
EIGEN_DEVICE_FUNC void update_value(type2index<n>& val, DenseIndex new_val) {
val.set(new_val);
}
template <typename T>
EIGEN_DEVICE_FUNC void update_value(T& val, IndexPair<DenseIndex> new_val) {
val = new_val;
}
template <DenseIndex f, DenseIndex s>
EIGEN_DEVICE_FUNC void update_value(type2indexpair<f, s>& val, IndexPair<DenseIndex> new_val) {
val.set(new_val);
}
template <typename T>
struct is_compile_time_constant {
static constexpr bool value = false;
};
template <DenseIndex idx>
struct is_compile_time_constant<type2index<idx> > {
static constexpr bool value = true;
};
template <DenseIndex idx>
struct is_compile_time_constant<const type2index<idx> > {
static constexpr bool value = true;
};
template <DenseIndex idx>
struct is_compile_time_constant<type2index<idx>& > {
static constexpr bool value = true;
};
template <DenseIndex idx>
struct is_compile_time_constant<const type2index<idx>& > {
static constexpr bool value = true;
};
template <DenseIndex f, DenseIndex s>
struct is_compile_time_constant<type2indexpair<f, s> > {
static constexpr bool value = true;
};
template <DenseIndex f, DenseIndex s>
struct is_compile_time_constant<const type2indexpair<f, s> > {
static constexpr bool value = true;
};
template <DenseIndex f, DenseIndex s>
struct is_compile_time_constant<type2indexpair<f, s>& > {
static constexpr bool value = true;
};
template <DenseIndex f, DenseIndex s>
struct is_compile_time_constant<const type2indexpair<f, s>& > {
static constexpr bool value = true;
};
template<typename... T>
struct IndexTuple;
template<typename T, typename... O>
struct IndexTuple<T, O...> {
EIGEN_DEVICE_FUNC constexpr IndexTuple() : head(), others() { }
EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v, const O... o) : head(v), others(o...) { }
constexpr static int count = 1 + sizeof...(O);
T head;
IndexTuple<O...> others;
typedef T Head;
typedef IndexTuple<O...> Other;
};
template<typename T>
struct IndexTuple<T> {
EIGEN_DEVICE_FUNC constexpr IndexTuple() : head() { }
EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v) : head(v) { }
constexpr static int count = 1;
T head;
typedef T Head;
};
template<int N, typename... T>
struct IndexTupleExtractor;
template<int N, typename T, typename... O>
struct IndexTupleExtractor<N, T, O...> {
typedef typename IndexTupleExtractor<N-1, O...>::ValType ValType;
EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple<T, O...>& val) {
return IndexTupleExtractor<N-1, O...>::get_val(val.others);
}
EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple<T, O...>& val) {
return IndexTupleExtractor<N-1, O...>::get_val(val.others);
}
template <typename V>
EIGEN_DEVICE_FUNC static void set_val(IndexTuple<T, O...>& val, V& new_val) {
IndexTupleExtractor<N-1, O...>::set_val(val.others, new_val);
}
};
template<typename T, typename... O>
struct IndexTupleExtractor<0, T, O...> {
typedef T ValType;
EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple<T, O...>& val) {
return val.head;
}
EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple<T, O...>& val) {
return val.head;
}
template <typename V>
EIGEN_DEVICE_FUNC static void set_val(IndexTuple<T, O...>& val, V& new_val) {
val.head = new_val;
}
};
template <int N, typename T, typename... O>
EIGEN_DEVICE_FUNC constexpr typename IndexTupleExtractor<N, T, O...>::ValType& array_get(IndexTuple<T, O...>& tuple) {
return IndexTupleExtractor<N, T, O...>::get_val(tuple);
}
template <int N, typename T, typename... O>
EIGEN_DEVICE_FUNC constexpr const typename IndexTupleExtractor<N, T, O...>::ValType& array_get(const IndexTuple<T, O...>& tuple) {
return IndexTupleExtractor<N, T, O...>::get_val(tuple);
}
template <typename T, typename... O>
struct array_size<IndexTuple<T, O...> > {
static const size_t value = IndexTuple<T, O...>::count;
};
template <typename T, typename... O>
struct array_size<const IndexTuple<T, O...> > {
static const size_t value = IndexTuple<T, O...>::count;
};
template <DenseIndex Idx, typename ValueT>
struct tuple_coeff {
template <typename... T>
EIGEN_DEVICE_FUNC static constexpr ValueT get(const DenseIndex i, const IndexTuple<T...>& t) {
// return array_get<Idx>(t) * (i == Idx) + tuple_coeff<Idx-1>::get(i, t) * (i != Idx);
return (i == Idx ? array_get<Idx>(t) : tuple_coeff<Idx-1, ValueT>::get(i, t));
}
template <typename... T>
EIGEN_DEVICE_FUNC static void set(const DenseIndex i, IndexTuple<T...>& t, const ValueT& value) {
if (i == Idx) {
update_value(array_get<Idx>(t), value);
} else {
tuple_coeff<Idx-1, ValueT>::set(i, t, value);
}
}
template <typename... T>
EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const DenseIndex i, const IndexTuple<T...>& t) {
return ((i == Idx) & is_compile_time_constant<typename IndexTupleExtractor<Idx, T...>::ValType>::value) ||
tuple_coeff<Idx-1, ValueT>::value_known_statically(i, t);
}
template <typename... T>
EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple<T...>& t) {
return is_compile_time_constant<typename IndexTupleExtractor<Idx, T...>::ValType>::value &&
tuple_coeff<Idx-1, ValueT>::values_up_to_known_statically(t);
}
template <typename... T>
EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple<T...>& t) {
return is_compile_time_constant<typename IndexTupleExtractor<Idx, T...>::ValType>::value &&
is_compile_time_constant<typename IndexTupleExtractor<Idx, T...>::ValType>::value &&
array_get<Idx>(t) > array_get<Idx-1>(t) &&
tuple_coeff<Idx-1, ValueT>::values_up_to_statically_known_to_increase(t);
}
};
template <typename ValueT>
struct tuple_coeff<0, ValueT> {
template <typename... T>
EIGEN_DEVICE_FUNC static constexpr ValueT get(const DenseIndex /*i*/, const IndexTuple<T...>& t) {
// eigen_assert (i == 0); // gcc fails to compile assertions in constexpr
return array_get<0>(t)/* * (i == 0)*/;
}
template <typename... T>
EIGEN_DEVICE_FUNC static void set(const DenseIndex i, IndexTuple<T...>& t, const ValueT value) {
eigen_assert (i == 0);
update_value(array_get<0>(t), value);
}
template <typename... T>
EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const DenseIndex i, const IndexTuple<T...>&) {
return is_compile_time_constant<typename IndexTupleExtractor<0, T...>::ValType>::value & (i == 0);
}
template <typename... T>
EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple<T...>&) {
return is_compile_time_constant<typename IndexTupleExtractor<0, T...>::ValType>::value;
}
template <typename... T>
EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple<T...>&) {
return true;
}
};
} // namespace internal
template<typename FirstType, typename... OtherTypes>
struct IndexList : internal::IndexTuple<FirstType, OtherTypes...> {
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr DenseIndex operator[] (const DenseIndex i) const {
return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::get(i, *this);
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr DenseIndex get(const DenseIndex i) const {
return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::get(i, *this);
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const DenseIndex i, const DenseIndex value) {
return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::set(i, *this, value);
}
EIGEN_DEVICE_FUNC constexpr IndexList(const internal::IndexTuple<FirstType, OtherTypes...>& other) : internal::IndexTuple<FirstType, OtherTypes...>(other) { }
EIGEN_DEVICE_FUNC constexpr IndexList(FirstType& first, OtherTypes... other) : internal::IndexTuple<FirstType, OtherTypes...>(first, other...) { }
EIGEN_DEVICE_FUNC constexpr IndexList() : internal::IndexTuple<FirstType, OtherTypes...>() { }
EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const DenseIndex i) const {
return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::value_known_statically(i, *this);
}
EIGEN_DEVICE_FUNC constexpr bool all_values_known_statically() const {
return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::values_up_to_known_statically(*this);
}
EIGEN_DEVICE_FUNC constexpr bool values_statically_known_to_increase() const {
return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::values_up_to_statically_known_to_increase(*this);
}
};
template<typename FirstType, typename... OtherTypes>
constexpr IndexList<FirstType, OtherTypes...> make_index_list(FirstType val1, OtherTypes... other_vals) {
return IndexList<FirstType, OtherTypes...>(val1, other_vals...);
}
template<typename FirstType, typename... OtherTypes>
struct IndexPairList : internal::IndexTuple<FirstType, OtherTypes...> {
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr IndexPair<DenseIndex> operator[] (const DenseIndex i) const {
return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, IndexPair<DenseIndex>>::get(i, *this);
}
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const DenseIndex i, const IndexPair<DenseIndex> value) {
return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...>>::value-1, IndexPair<DenseIndex> >::set(i, *this, value);
}
EIGEN_DEVICE_FUNC constexpr IndexPairList(const internal::IndexTuple<FirstType, OtherTypes...>& other) : internal::IndexTuple<FirstType, OtherTypes...>(other) { }
EIGEN_DEVICE_FUNC constexpr IndexPairList() : internal::IndexTuple<FirstType, OtherTypes...>() { }
EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const DenseIndex i) const {
return internal::tuple_coeff<internal::array_size<internal::IndexTuple<FirstType, OtherTypes...> >::value-1, DenseIndex>::value_known_statically(i, *this);
}
};
namespace internal {
template<typename FirstType, typename... OtherTypes> size_t array_prod(const IndexList<FirstType, OtherTypes...>& sizes) {
size_t result = 1;
for (int i = 0; i < array_size<IndexList<FirstType, OtherTypes...> >::value; ++i) {
result *= sizes[i];
}
return result;
}
template<typename FirstType, typename... OtherTypes> struct array_size<IndexList<FirstType, OtherTypes...> > {
static const size_t value = array_size<IndexTuple<FirstType, OtherTypes...> >::value;
};
template<typename FirstType, typename... OtherTypes> struct array_size<const IndexList<FirstType, OtherTypes...> > {
static const size_t value = array_size<IndexTuple<FirstType, OtherTypes...> >::value;
};
template<typename FirstType, typename... OtherTypes> struct array_size<IndexPairList<FirstType, OtherTypes...> > {
static const size_t value = std::tuple_size<std::tuple<FirstType, OtherTypes...> >::value;
};
template<typename FirstType, typename... OtherTypes> struct array_size<const IndexPairList<FirstType, OtherTypes...> > {
static const size_t value = std::tuple_size<std::tuple<FirstType, OtherTypes...> >::value;
};
template<DenseIndex N, typename FirstType, typename... OtherTypes> EIGEN_DEVICE_FUNC constexpr DenseIndex array_get(IndexList<FirstType, OtherTypes...>& a) {
return IndexTupleExtractor<N, FirstType, OtherTypes...>::get_val(a);
}
template<DenseIndex N, typename FirstType, typename... OtherTypes> EIGEN_DEVICE_FUNC constexpr DenseIndex array_get(const IndexList<FirstType, OtherTypes...>& a) {
return IndexTupleExtractor<N, FirstType, OtherTypes...>::get_val(a);
}
template <typename T>
struct index_known_statically_impl {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_known_statically_impl<IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i);
}
};
template <typename FirstType, typename... OtherTypes>
struct index_known_statically_impl<const IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i);
}
};
template <typename T>
struct all_indices_known_statically_impl {
static constexpr bool run() {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct all_indices_known_statically_impl<IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return IndexList<FirstType, OtherTypes...>().all_values_known_statically();
}
};
template <typename FirstType, typename... OtherTypes>
struct all_indices_known_statically_impl<const IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return IndexList<FirstType, OtherTypes...>().all_values_known_statically();
}
};
template <typename T>
struct indices_statically_known_to_increase_impl {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct indices_statically_known_to_increase_impl<IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return Eigen::IndexList<FirstType, OtherTypes...>().values_statically_known_to_increase();
}
};
template <typename FirstType, typename... OtherTypes>
struct indices_statically_known_to_increase_impl<const IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return Eigen::IndexList<FirstType, OtherTypes...>().values_statically_known_to_increase();
}
};
template <typename Tx>
struct index_statically_eq_impl {
EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_eq_impl<IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) == value);
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_eq_impl<const IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) == value);
}
};
template <typename T>
struct index_statically_ne_impl {
EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_ne_impl<IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) != value);
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_ne_impl<const IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) != value);
}
};
template <typename T>
struct index_statically_gt_impl {
EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_gt_impl<IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) > value);
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_gt_impl<const IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) > value);
}
};
template <typename T>
struct index_statically_lt_impl {
EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_lt_impl<IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) < value);
}
};
template <typename FirstType, typename... OtherTypes>
struct index_statically_lt_impl<const IndexList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexList<FirstType, OtherTypes...>().get(i) < value);
}
};
template <typename Tx>
struct index_pair_first_statically_eq_impl {
EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_pair_first_statically_eq_impl<IndexPairList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexPairList<FirstType, OtherTypes...>().operator[](i).first == value);
}
};
template <typename FirstType, typename... OtherTypes>
struct index_pair_first_statically_eq_impl<const IndexPairList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexPairList<FirstType, OtherTypes...>().operator[](i).first == value);
}
};
template <typename Tx>
struct index_pair_second_statically_eq_impl {
EIGEN_DEVICE_FUNC static constexpr bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename FirstType, typename... OtherTypes>
struct index_pair_second_statically_eq_impl<IndexPairList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexPairList<FirstType, OtherTypes...>().operator[](i).second == value);
}
};
template <typename FirstType, typename... OtherTypes>
struct index_pair_second_statically_eq_impl<const IndexPairList<FirstType, OtherTypes...> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return IndexPairList<FirstType, OtherTypes...>().value_known_statically(i) &
(IndexPairList<FirstType, OtherTypes...>().operator[](i).second == value);
}
};
} // end namespace internal
} // end namespace Eigen
#else
namespace Eigen {
namespace internal {
template <typename T>
struct index_known_statically_impl {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
return false;
}
};
template <typename T>
struct all_indices_known_statically_impl {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
return false;
}
};
template <typename T>
struct indices_statically_known_to_increase_impl {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
return false;
}
};
template <typename T>
struct index_statically_eq_impl {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename T>
struct index_statically_ne_impl {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename T>
struct index_statically_gt_impl {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename T>
struct index_statically_lt_impl {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename Tx>
struct index_pair_first_statically_eq_impl {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
return false;
}
};
template <typename Tx>
struct index_pair_second_statically_eq_impl {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
return false;
}
};
} // end namespace internal
} // end namespace Eigen
#endif
namespace Eigen {
namespace internal {
template <typename T>
static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_known_statically(DenseIndex i) {
return index_known_statically_impl<T>::run(i);
}
template <typename T>
static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool all_indices_known_statically() {
return all_indices_known_statically_impl<T>::run();
}
template <typename T>
static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool indices_statically_known_to_increase() {
return indices_statically_known_to_increase_impl<T>::run();
}
template <typename T>
static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_eq(DenseIndex i, DenseIndex value) {
return index_statically_eq_impl<T>::run(i, value);
}
template <typename T>
static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_ne(DenseIndex i, DenseIndex value) {
return index_statically_ne_impl<T>::run(i, value);
}
template <typename T>
static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_gt(DenseIndex i, DenseIndex value) {
return index_statically_gt_impl<T>::run(i, value);
}
template <typename T>
static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_lt(DenseIndex i, DenseIndex value) {
return index_statically_lt_impl<T>::run(i, value);
}
template <typename T>
static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_first_statically_eq(DenseIndex i, DenseIndex value) {
return index_pair_first_statically_eq_impl<T>::run(i, value);
}
template <typename T>
static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_second_statically_eq(DenseIndex i, DenseIndex value) {
return index_pair_second_statically_eq_impl<T>::run(i, value);
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H
| 25,810 | 34.552342 | 175 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.