repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
abess
|
abess-master/python/include/Eigen/src/Core/products/GeneralMatrixVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H
#define EIGEN_GENERAL_MATRIX_VECTOR_H
namespace Eigen {
namespace internal {
/* Optimized col-major matrix * vector product:
* This algorithm processes 4 columns at onces that allows to both reduce
* the number of load/stores of the result by a factor 4 and to reduce
* the instruction dependency. Moreover, we know that all bands have the
* same alignment pattern.
*
* Mixing type logic: C += alpha * A * B
* | A | B |alpha| comments
* |real |cplx |cplx | no vectorization
* |real |cplx |real | alpha is converted to a cplx when calling the run function, no vectorization
* |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp
* |cplx |real |real | optimal case, vectorization possible via real-cplx mul
*
* Accesses to the matrix coefficients follow the following logic:
*
* - if all columns have the same alignment then
* - if the columns have the same alignment as the result vector, then easy! (-> AllAligned case)
* - otherwise perform unaligned loads only (-> NoneAligned case)
* - otherwise
* - if even columns have the same alignment then
* // odd columns are guaranteed to have the same alignment too
* - if even or odd columns have the same alignment as the result, then
* // for a register size of 2 scalars, this is guarantee to be the case (e.g., SSE with double)
* - perform half aligned and half unaligned loads (-> EvenAligned case)
* - otherwise perform unaligned loads only (-> NoneAligned case)
* - otherwise, if the register size is 4 scalars (e.g., SSE with float) then
* - one over 4 consecutive columns is guaranteed to be aligned with the result vector,
* perform simple aligned loads for this column and aligned loads plus re-alignment for the other. (-> FirstAligned case)
* // this re-alignment is done by the palign function implemented for SSE in Eigen/src/Core/arch/SSE/PacketMath.h
* - otherwise,
* // if we get here, this means the register size is greater than 4 (e.g., AVX with floats),
* // we currently fall back to the NoneAligned case
*
* The same reasoning apply for the transposed case.
*
* The last case (PacketSize>4) could probably be improved by generalizing the FirstAligned case, but since we do not support AVX yet...
* One might also wonder why in the EvenAligned case we perform unaligned loads instead of using the aligned-loads plus re-alignment
* strategy as in the FirstAligned case. The reason is that we observed that unaligned loads on a 8 byte boundary are not too slow
* compared to unaligned loads on a 4 byte boundary.
*
*/
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
{
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
enum {
Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
&& int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
};
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
typedef typename packet_traits<RhsScalar>::type _RhsPacket;
typedef typename packet_traits<ResScalar>::type _ResPacket;
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
ResScalar* res, Index resIncr,
RhsScalar alpha);
};
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
ResScalar* res, Index resIncr,
RhsScalar alpha)
{
EIGEN_UNUSED_VARIABLE(resIncr);
eigen_internal_assert(resIncr==1);
#ifdef _EIGEN_ACCUMULATE_PACKETS
#error _EIGEN_ACCUMULATE_PACKETS has already been defined
#endif
#define _EIGEN_ACCUMULATE_PACKETS(Alignment0,Alignment13,Alignment2) \
pstore(&res[j], \
padd(pload<ResPacket>(&res[j]), \
padd( \
padd(pcj.pmul(lhs0.template load<LhsPacket, Alignment0>(j), ptmp0), \
pcj.pmul(lhs1.template load<LhsPacket, Alignment13>(j), ptmp1)), \
padd(pcj.pmul(lhs2.template load<LhsPacket, Alignment2>(j), ptmp2), \
pcj.pmul(lhs3.template load<LhsPacket, Alignment13>(j), ptmp3)) )))
typedef typename LhsMapper::VectorMapper LhsScalars;
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
if(ConjugateRhs)
alpha = numext::conj(alpha);
enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
const Index columnsAtOnce = 4;
const Index peels = 2;
const Index LhsPacketAlignedMask = LhsPacketSize-1;
const Index ResPacketAlignedMask = ResPacketSize-1;
// const Index PeelAlignedMask = ResPacketSize*peels-1;
const Index size = rows;
const Index lhsStride = lhs.stride();
// How many coeffs of the result do we have to skip to be aligned.
// Here we assume data are at least aligned on the base scalar type.
Index alignedStart = internal::first_default_aligned(res,size);
Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0;
const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
Index alignmentPattern = alignmentStep==0 ? AllAligned
: alignmentStep==(LhsPacketSize/2) ? EvenAligned
: FirstAligned;
// we cannot assume the first element is aligned because of sub-matrices
const Index lhsAlignmentOffset = lhs.firstAligned(size);
// find how many columns do we have to skip to be aligned with the result (if possible)
Index skipColumns = 0;
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
if( (lhsAlignmentOffset < 0) || (lhsAlignmentOffset == size) || (UIntPtr(res)%sizeof(ResScalar)) )
{
alignedSize = 0;
alignedStart = 0;
alignmentPattern = NoneAligned;
}
else if(LhsPacketSize > 4)
{
// TODO: extend the code to support aligned loads whenever possible when LhsPacketSize > 4.
// Currently, it seems to be better to perform unaligned loads anyway
alignmentPattern = NoneAligned;
}
else if (LhsPacketSize>1)
{
// eigen_internal_assert(size_t(firstLhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size<LhsPacketSize);
while (skipColumns<LhsPacketSize &&
alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%LhsPacketSize))
++skipColumns;
if (skipColumns==LhsPacketSize)
{
// nothing can be aligned, no need to skip any column
alignmentPattern = NoneAligned;
skipColumns = 0;
}
else
{
skipColumns = (std::min)(skipColumns,cols);
// note that the skiped columns are processed later.
}
/* eigen_internal_assert( (alignmentPattern==NoneAligned)
|| (skipColumns + columnsAtOnce >= cols)
|| LhsPacketSize > size
|| (size_t(firstLhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0);*/
}
else if(Vectorizable)
{
alignedStart = 0;
alignedSize = size;
alignmentPattern = AllAligned;
}
const Index offset1 = (FirstAligned && alignmentStep==1)?3:1;
const Index offset3 = (FirstAligned && alignmentStep==1)?1:3;
Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce)
{
RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs(i, 0)),
ptmp1 = pset1<RhsPacket>(alpha*rhs(i+offset1, 0)),
ptmp2 = pset1<RhsPacket>(alpha*rhs(i+2, 0)),
ptmp3 = pset1<RhsPacket>(alpha*rhs(i+offset3, 0));
// this helps a lot generating better binary code
const LhsScalars lhs0 = lhs.getVectorMapper(0, i+0), lhs1 = lhs.getVectorMapper(0, i+offset1),
lhs2 = lhs.getVectorMapper(0, i+2), lhs3 = lhs.getVectorMapper(0, i+offset3);
if (Vectorizable)
{
/* explicit vectorization */
// process initial unaligned coeffs
for (Index j=0; j<alignedStart; ++j)
{
res[j] = cj.pmadd(lhs0(j), pfirst(ptmp0), res[j]);
res[j] = cj.pmadd(lhs1(j), pfirst(ptmp1), res[j]);
res[j] = cj.pmadd(lhs2(j), pfirst(ptmp2), res[j]);
res[j] = cj.pmadd(lhs3(j), pfirst(ptmp3), res[j]);
}
if (alignedSize>alignedStart)
{
switch(alignmentPattern)
{
case AllAligned:
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(Aligned,Aligned,Aligned);
break;
case EvenAligned:
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(Aligned,Unaligned,Aligned);
break;
case FirstAligned:
{
Index j = alignedStart;
if(peels>1)
{
LhsPacket A00, A01, A02, A03, A10, A11, A12, A13;
ResPacket T0, T1;
A01 = lhs1.template load<LhsPacket, Aligned>(alignedStart-1);
A02 = lhs2.template load<LhsPacket, Aligned>(alignedStart-2);
A03 = lhs3.template load<LhsPacket, Aligned>(alignedStart-3);
for (; j<peeledSize; j+=peels*ResPacketSize)
{
A11 = lhs1.template load<LhsPacket, Aligned>(j-1+LhsPacketSize); palign<1>(A01,A11);
A12 = lhs2.template load<LhsPacket, Aligned>(j-2+LhsPacketSize); palign<2>(A02,A12);
A13 = lhs3.template load<LhsPacket, Aligned>(j-3+LhsPacketSize); palign<3>(A03,A13);
A00 = lhs0.template load<LhsPacket, Aligned>(j);
A10 = lhs0.template load<LhsPacket, Aligned>(j+LhsPacketSize);
T0 = pcj.pmadd(A00, ptmp0, pload<ResPacket>(&res[j]));
T1 = pcj.pmadd(A10, ptmp0, pload<ResPacket>(&res[j+ResPacketSize]));
T0 = pcj.pmadd(A01, ptmp1, T0);
A01 = lhs1.template load<LhsPacket, Aligned>(j-1+2*LhsPacketSize); palign<1>(A11,A01);
T0 = pcj.pmadd(A02, ptmp2, T0);
A02 = lhs2.template load<LhsPacket, Aligned>(j-2+2*LhsPacketSize); palign<2>(A12,A02);
T0 = pcj.pmadd(A03, ptmp3, T0);
pstore(&res[j],T0);
A03 = lhs3.template load<LhsPacket, Aligned>(j-3+2*LhsPacketSize); palign<3>(A13,A03);
T1 = pcj.pmadd(A11, ptmp1, T1);
T1 = pcj.pmadd(A12, ptmp2, T1);
T1 = pcj.pmadd(A13, ptmp3, T1);
pstore(&res[j+ResPacketSize],T1);
}
}
for (; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(Aligned,Unaligned,Unaligned);
break;
}
default:
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(Unaligned,Unaligned,Unaligned);
break;
}
}
} // end explicit vectorization
/* process remaining coeffs (or all if there is no explicit vectorization) */
for (Index j=alignedSize; j<size; ++j)
{
res[j] = cj.pmadd(lhs0(j), pfirst(ptmp0), res[j]);
res[j] = cj.pmadd(lhs1(j), pfirst(ptmp1), res[j]);
res[j] = cj.pmadd(lhs2(j), pfirst(ptmp2), res[j]);
res[j] = cj.pmadd(lhs3(j), pfirst(ptmp3), res[j]);
}
}
// process remaining first and last columns (at most columnsAtOnce-1)
Index end = cols;
Index start = columnBound;
do
{
for (Index k=start; k<end; ++k)
{
RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs(k, 0));
const LhsScalars lhs0 = lhs.getVectorMapper(0, k);
if (Vectorizable)
{
/* explicit vectorization */
// process first unaligned result's coeffs
for (Index j=0; j<alignedStart; ++j)
res[j] += cj.pmul(lhs0(j), pfirst(ptmp0));
// process aligned result's coeffs
if (lhs0.template aligned<LhsPacket>(alignedStart))
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
pstore(&res[i], pcj.pmadd(lhs0.template load<LhsPacket, Aligned>(i), ptmp0, pload<ResPacket>(&res[i])));
else
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
pstore(&res[i], pcj.pmadd(lhs0.template load<LhsPacket, Unaligned>(i), ptmp0, pload<ResPacket>(&res[i])));
}
// process remaining scalars (or all if no explicit vectorization)
for (Index i=alignedSize; i<size; ++i)
res[i] += cj.pmul(lhs0(i), pfirst(ptmp0));
}
if (skipColumns)
{
start = 0;
end = skipColumns;
skipColumns = 0;
}
else
break;
} while(Vectorizable);
#undef _EIGEN_ACCUMULATE_PACKETS
}
/* Optimized row-major matrix * vector product:
* This algorithm processes 4 rows at onces that allows to both reduce
* the number of load/stores of the result by a factor 4 and to reduce
* the instruction dependency. Moreover, we know that all bands have the
* same alignment pattern.
*
* Mixing type logic:
* - alpha is always a complex (or converted to a complex)
* - no vectorization
*/
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
{
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
enum {
Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
&& int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
};
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
typedef typename packet_traits<RhsScalar>::type _RhsPacket;
typedef typename packet_traits<ResScalar>::type _ResPacket;
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
ResScalar* res, Index resIncr,
ResScalar alpha);
};
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols,
const LhsMapper& lhs,
const RhsMapper& rhs,
ResScalar* res, Index resIncr,
ResScalar alpha)
{
eigen_internal_assert(rhs.stride()==1);
#ifdef _EIGEN_ACCUMULATE_PACKETS
#error _EIGEN_ACCUMULATE_PACKETS has already been defined
#endif
#define _EIGEN_ACCUMULATE_PACKETS(Alignment0,Alignment13,Alignment2) {\
RhsPacket b = rhs.getVectorMapper(j, 0).template load<RhsPacket, Aligned>(0); \
ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Alignment0>(j), b, ptmp0); \
ptmp1 = pcj.pmadd(lhs1.template load<LhsPacket, Alignment13>(j), b, ptmp1); \
ptmp2 = pcj.pmadd(lhs2.template load<LhsPacket, Alignment2>(j), b, ptmp2); \
ptmp3 = pcj.pmadd(lhs3.template load<LhsPacket, Alignment13>(j), b, ptmp3); }
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
typedef typename LhsMapper::VectorMapper LhsScalars;
enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
const Index rowsAtOnce = 4;
const Index peels = 2;
const Index RhsPacketAlignedMask = RhsPacketSize-1;
const Index LhsPacketAlignedMask = LhsPacketSize-1;
const Index depth = cols;
const Index lhsStride = lhs.stride();
// How many coeffs of the result do we have to skip to be aligned.
// Here we assume data are at least aligned on the base scalar type
// if that's not the case then vectorization is discarded, see below.
Index alignedStart = rhs.firstAligned(depth);
Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0;
const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
Index alignmentPattern = alignmentStep==0 ? AllAligned
: alignmentStep==(LhsPacketSize/2) ? EvenAligned
: FirstAligned;
// we cannot assume the first element is aligned because of sub-matrices
const Index lhsAlignmentOffset = lhs.firstAligned(depth);
const Index rhsAlignmentOffset = rhs.firstAligned(rows);
// find how many rows do we have to skip to be aligned with rhs (if possible)
Index skipRows = 0;
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
if( (sizeof(LhsScalar)!=sizeof(RhsScalar)) ||
(lhsAlignmentOffset < 0) || (lhsAlignmentOffset == depth) ||
(rhsAlignmentOffset < 0) || (rhsAlignmentOffset == rows) )
{
alignedSize = 0;
alignedStart = 0;
alignmentPattern = NoneAligned;
}
else if(LhsPacketSize > 4)
{
// TODO: extend the code to support aligned loads whenever possible when LhsPacketSize > 4.
alignmentPattern = NoneAligned;
}
else if (LhsPacketSize>1)
{
// eigen_internal_assert(size_t(firstLhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || depth<LhsPacketSize);
while (skipRows<LhsPacketSize &&
alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%LhsPacketSize))
++skipRows;
if (skipRows==LhsPacketSize)
{
// nothing can be aligned, no need to skip any column
alignmentPattern = NoneAligned;
skipRows = 0;
}
else
{
skipRows = (std::min)(skipRows,Index(rows));
// note that the skiped columns are processed later.
}
/* eigen_internal_assert( alignmentPattern==NoneAligned
|| LhsPacketSize==1
|| (skipRows + rowsAtOnce >= rows)
|| LhsPacketSize > depth
|| (size_t(firstLhs+alignedStart+lhsStride*skipRows)%sizeof(LhsPacket))==0);*/
}
else if(Vectorizable)
{
alignedStart = 0;
alignedSize = depth;
alignmentPattern = AllAligned;
}
const Index offset1 = (FirstAligned && alignmentStep==1)?3:1;
const Index offset3 = (FirstAligned && alignmentStep==1)?1:3;
Index rowBound = ((rows-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
for (Index i=skipRows; i<rowBound; i+=rowsAtOnce)
{
// FIXME: what is the purpose of this EIGEN_ALIGN_DEFAULT ??
EIGEN_ALIGN_MAX ResScalar tmp0 = ResScalar(0);
ResScalar tmp1 = ResScalar(0), tmp2 = ResScalar(0), tmp3 = ResScalar(0);
// this helps the compiler generating good binary code
const LhsScalars lhs0 = lhs.getVectorMapper(i+0, 0), lhs1 = lhs.getVectorMapper(i+offset1, 0),
lhs2 = lhs.getVectorMapper(i+2, 0), lhs3 = lhs.getVectorMapper(i+offset3, 0);
if (Vectorizable)
{
/* explicit vectorization */
ResPacket ptmp0 = pset1<ResPacket>(ResScalar(0)), ptmp1 = pset1<ResPacket>(ResScalar(0)),
ptmp2 = pset1<ResPacket>(ResScalar(0)), ptmp3 = pset1<ResPacket>(ResScalar(0));
// process initial unaligned coeffs
// FIXME this loop get vectorized by the compiler !
for (Index j=0; j<alignedStart; ++j)
{
RhsScalar b = rhs(j, 0);
tmp0 += cj.pmul(lhs0(j),b); tmp1 += cj.pmul(lhs1(j),b);
tmp2 += cj.pmul(lhs2(j),b); tmp3 += cj.pmul(lhs3(j),b);
}
if (alignedSize>alignedStart)
{
switch(alignmentPattern)
{
case AllAligned:
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(Aligned,Aligned,Aligned);
break;
case EvenAligned:
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(Aligned,Unaligned,Aligned);
break;
case FirstAligned:
{
Index j = alignedStart;
if (peels>1)
{
/* Here we proccess 4 rows with with two peeled iterations to hide
* the overhead of unaligned loads. Moreover unaligned loads are handled
* using special shift/move operations between the two aligned packets
* overlaping the desired unaligned packet. This is *much* more efficient
* than basic unaligned loads.
*/
LhsPacket A01, A02, A03, A11, A12, A13;
A01 = lhs1.template load<LhsPacket, Aligned>(alignedStart-1);
A02 = lhs2.template load<LhsPacket, Aligned>(alignedStart-2);
A03 = lhs3.template load<LhsPacket, Aligned>(alignedStart-3);
for (; j<peeledSize; j+=peels*RhsPacketSize)
{
RhsPacket b = rhs.getVectorMapper(j, 0).template load<RhsPacket, Aligned>(0);
A11 = lhs1.template load<LhsPacket, Aligned>(j-1+LhsPacketSize); palign<1>(A01,A11);
A12 = lhs2.template load<LhsPacket, Aligned>(j-2+LhsPacketSize); palign<2>(A02,A12);
A13 = lhs3.template load<LhsPacket, Aligned>(j-3+LhsPacketSize); palign<3>(A03,A13);
ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Aligned>(j), b, ptmp0);
ptmp1 = pcj.pmadd(A01, b, ptmp1);
A01 = lhs1.template load<LhsPacket, Aligned>(j-1+2*LhsPacketSize); palign<1>(A11,A01);
ptmp2 = pcj.pmadd(A02, b, ptmp2);
A02 = lhs2.template load<LhsPacket, Aligned>(j-2+2*LhsPacketSize); palign<2>(A12,A02);
ptmp3 = pcj.pmadd(A03, b, ptmp3);
A03 = lhs3.template load<LhsPacket, Aligned>(j-3+2*LhsPacketSize); palign<3>(A13,A03);
b = rhs.getVectorMapper(j+RhsPacketSize, 0).template load<RhsPacket, Aligned>(0);
ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Aligned>(j+LhsPacketSize), b, ptmp0);
ptmp1 = pcj.pmadd(A11, b, ptmp1);
ptmp2 = pcj.pmadd(A12, b, ptmp2);
ptmp3 = pcj.pmadd(A13, b, ptmp3);
}
}
for (; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(Aligned,Unaligned,Unaligned);
break;
}
default:
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(Unaligned,Unaligned,Unaligned);
break;
}
tmp0 += predux(ptmp0);
tmp1 += predux(ptmp1);
tmp2 += predux(ptmp2);
tmp3 += predux(ptmp3);
}
} // end explicit vectorization
// process remaining coeffs (or all if no explicit vectorization)
// FIXME this loop get vectorized by the compiler !
for (Index j=alignedSize; j<depth; ++j)
{
RhsScalar b = rhs(j, 0);
tmp0 += cj.pmul(lhs0(j),b); tmp1 += cj.pmul(lhs1(j),b);
tmp2 += cj.pmul(lhs2(j),b); tmp3 += cj.pmul(lhs3(j),b);
}
res[i*resIncr] += alpha*tmp0;
res[(i+offset1)*resIncr] += alpha*tmp1;
res[(i+2)*resIncr] += alpha*tmp2;
res[(i+offset3)*resIncr] += alpha*tmp3;
}
// process remaining first and last rows (at most columnsAtOnce-1)
Index end = rows;
Index start = rowBound;
do
{
for (Index i=start; i<end; ++i)
{
EIGEN_ALIGN_MAX ResScalar tmp0 = ResScalar(0);
ResPacket ptmp0 = pset1<ResPacket>(tmp0);
const LhsScalars lhs0 = lhs.getVectorMapper(i, 0);
// process first unaligned result's coeffs
// FIXME this loop get vectorized by the compiler !
for (Index j=0; j<alignedStart; ++j)
tmp0 += cj.pmul(lhs0(j), rhs(j, 0));
if (alignedSize>alignedStart)
{
// process aligned rhs coeffs
if (lhs0.template aligned<LhsPacket>(alignedStart))
for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Aligned>(j), rhs.getVectorMapper(j, 0).template load<RhsPacket, Aligned>(0), ptmp0);
else
for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Unaligned>(j), rhs.getVectorMapper(j, 0).template load<RhsPacket, Aligned>(0), ptmp0);
tmp0 += predux(ptmp0);
}
// process remaining scalars
// FIXME this loop get vectorized by the compiler !
for (Index j=alignedSize; j<depth; ++j)
tmp0 += cj.pmul(lhs0(j), rhs(j, 0));
res[i*resIncr] += alpha*tmp0;
}
if (skipRows)
{
start = 0;
end = skipRows;
skipRows = 0;
}
else
break;
} while(Vectorizable);
#undef _EIGEN_ACCUMULATE_PACKETS
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_VECTOR_H
| 26,736 | 42.124194 | 155 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to BLAS F77
* General matrix-vector product functionality based on ?GEMV.
********************************************************************************
*/
#ifndef EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H
#define EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H
namespace Eigen {
namespace internal {
/**********************************************************************
* This file implements general matrix-vector multiplication using BLAS
* gemv function via partial specialization of
* general_matrix_vector_product::run(..) method for float, double,
* std::complex<float> and std::complex<double> types
**********************************************************************/
// gemv specialization
template<typename Index, typename LhsScalar, int StorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
struct general_matrix_vector_product_gemv;
#define EIGEN_BLAS_GEMV_SPECIALIZE(Scalar) \
template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
struct general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,ConjugateRhs,Specialized> { \
static void run( \
Index rows, Index cols, \
const const_blas_data_mapper<Scalar,Index,ColMajor> &lhs, \
const const_blas_data_mapper<Scalar,Index,RowMajor> &rhs, \
Scalar* res, Index resIncr, Scalar alpha) \
{ \
if (ConjugateLhs) { \
general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,ConjugateRhs,BuiltIn>::run( \
rows, cols, lhs, rhs, res, resIncr, alpha); \
} else { \
general_matrix_vector_product_gemv<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \
} \
} \
}; \
template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
struct general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,RowMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ConjugateRhs,Specialized> { \
static void run( \
Index rows, Index cols, \
const const_blas_data_mapper<Scalar,Index,RowMajor> &lhs, \
const const_blas_data_mapper<Scalar,Index,ColMajor> &rhs, \
Scalar* res, Index resIncr, Scalar alpha) \
{ \
general_matrix_vector_product_gemv<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \
} \
}; \
EIGEN_BLAS_GEMV_SPECIALIZE(double)
EIGEN_BLAS_GEMV_SPECIALIZE(float)
EIGEN_BLAS_GEMV_SPECIALIZE(dcomplex)
EIGEN_BLAS_GEMV_SPECIALIZE(scomplex)
#define EIGEN_BLAS_GEMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASPREFIX) \
template<typename Index, int LhsStorageOrder, bool ConjugateLhs, bool ConjugateRhs> \
struct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,ConjugateRhs> \
{ \
typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> GEMVVector;\
\
static void run( \
Index rows, Index cols, \
const EIGTYPE* lhs, Index lhsStride, \
const EIGTYPE* rhs, Index rhsIncr, \
EIGTYPE* res, Index resIncr, EIGTYPE alpha) \
{ \
BlasIndex m=convert_index<BlasIndex>(rows), n=convert_index<BlasIndex>(cols), \
lda=convert_index<BlasIndex>(lhsStride), incx=convert_index<BlasIndex>(rhsIncr), incy=convert_index<BlasIndex>(resIncr); \
const EIGTYPE beta(1); \
const EIGTYPE *x_ptr; \
char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \
if (LhsStorageOrder==RowMajor) { \
m = convert_index<BlasIndex>(cols); \
n = convert_index<BlasIndex>(rows); \
}\
GEMVVector x_tmp; \
if (ConjugateRhs) { \
Map<const GEMVVector, 0, InnerStride<> > map_x(rhs,cols,1,InnerStride<>(incx)); \
x_tmp=map_x.conjugate(); \
x_ptr=x_tmp.data(); \
incx=1; \
} else x_ptr=rhs; \
BLASPREFIX##gemv_(&trans, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, &numext::real_ref(beta), (BLASTYPE*)res, &incy); \
}\
};
EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, d)
EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, s)
EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, double, z)
EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, float, c)
} // end namespase internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H
| 6,053 | 45.569231 | 200 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/Parallelizer.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static EIGEN_UNUSED int m_maxThreads = -1;
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
Index volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
| 4,905 | 28.914634 | 125 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_H
#define EIGEN_SELFADJOINT_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
// pack a selfadjoint block diagonal for use with the gebp_kernel
template<typename Scalar, typename Index, int Pack1, int Pack2_dummy, int StorageOrder>
struct symm_pack_lhs
{
template<int BlockRows> inline
void pack(Scalar* blockA, const const_blas_data_mapper<Scalar,Index,StorageOrder>& lhs, Index cols, Index i, Index& count)
{
// normal copy
for(Index k=0; k<i; k++)
for(Index w=0; w<BlockRows; w++)
blockA[count++] = lhs(i+w,k); // normal
// symmetric copy
Index h = 0;
for(Index k=i; k<i+BlockRows; k++)
{
for(Index w=0; w<h; w++)
blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
blockA[count++] = numext::real(lhs(k,k)); // real (diagonal)
for(Index w=h+1; w<BlockRows; w++)
blockA[count++] = lhs(i+w, k); // normal
++h;
}
// transposed copy
for(Index k=i+BlockRows; k<cols; k++)
for(Index w=0; w<BlockRows; w++)
blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
}
void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
{
enum { PacketSize = packet_traits<Scalar>::size };
const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);
Index count = 0;
//Index peeled_mc3 = (rows/Pack1)*Pack1;
const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
if(Pack1>=3*PacketSize)
for(Index i=0; i<peeled_mc3; i+=3*PacketSize)
pack<3*PacketSize>(blockA, lhs, cols, i, count);
if(Pack1>=2*PacketSize)
for(Index i=peeled_mc3; i<peeled_mc2; i+=2*PacketSize)
pack<2*PacketSize>(blockA, lhs, cols, i, count);
if(Pack1>=1*PacketSize)
for(Index i=peeled_mc2; i<peeled_mc1; i+=1*PacketSize)
pack<1*PacketSize>(blockA, lhs, cols, i, count);
// do the same with mr==1
for(Index i=peeled_mc1; i<rows; i++)
{
for(Index k=0; k<i; k++)
blockA[count++] = lhs(i, k); // normal
blockA[count++] = numext::real(lhs(i, i)); // real (diagonal)
for(Index k=i+1; k<cols; k++)
blockA[count++] = numext::conj(lhs(k, i)); // transposed
}
}
};
template<typename Scalar, typename Index, int nr, int StorageOrder>
struct symm_pack_rhs
{
enum { PacketSize = packet_traits<Scalar>::size };
void operator()(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
{
Index end_k = k2 + rows;
Index count = 0;
const_blas_data_mapper<Scalar,Index,StorageOrder> rhs(_rhs,rhsStride);
Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
// first part: normal case
for(Index j2=0; j2<k2; j2+=nr)
{
for(Index k=k2; k<end_k; k++)
{
blockB[count+0] = rhs(k,j2+0);
blockB[count+1] = rhs(k,j2+1);
if (nr>=4)
{
blockB[count+2] = rhs(k,j2+2);
blockB[count+3] = rhs(k,j2+3);
}
if (nr>=8)
{
blockB[count+4] = rhs(k,j2+4);
blockB[count+5] = rhs(k,j2+5);
blockB[count+6] = rhs(k,j2+6);
blockB[count+7] = rhs(k,j2+7);
}
count += nr;
}
}
// second part: diagonal block
Index end8 = nr>=8 ? (std::min)(k2+rows,packet_cols8) : k2;
if(nr>=8)
{
for(Index j2=k2; j2<end8; j2+=8)
{
// again we can split vertically in three different parts (transpose, symmetric, normal)
// transpose
for(Index k=k2; k<j2; k++)
{
blockB[count+0] = numext::conj(rhs(j2+0,k));
blockB[count+1] = numext::conj(rhs(j2+1,k));
blockB[count+2] = numext::conj(rhs(j2+2,k));
blockB[count+3] = numext::conj(rhs(j2+3,k));
blockB[count+4] = numext::conj(rhs(j2+4,k));
blockB[count+5] = numext::conj(rhs(j2+5,k));
blockB[count+6] = numext::conj(rhs(j2+6,k));
blockB[count+7] = numext::conj(rhs(j2+7,k));
count += 8;
}
// symmetric
Index h = 0;
for(Index k=j2; k<j2+8; k++)
{
// normal
for (Index w=0 ; w<h; ++w)
blockB[count+w] = rhs(k,j2+w);
blockB[count+h] = numext::real(rhs(k,k));
// transpose
for (Index w=h+1 ; w<8; ++w)
blockB[count+w] = numext::conj(rhs(j2+w,k));
count += 8;
++h;
}
// normal
for(Index k=j2+8; k<end_k; k++)
{
blockB[count+0] = rhs(k,j2+0);
blockB[count+1] = rhs(k,j2+1);
blockB[count+2] = rhs(k,j2+2);
blockB[count+3] = rhs(k,j2+3);
blockB[count+4] = rhs(k,j2+4);
blockB[count+5] = rhs(k,j2+5);
blockB[count+6] = rhs(k,j2+6);
blockB[count+7] = rhs(k,j2+7);
count += 8;
}
}
}
if(nr>=4)
{
for(Index j2=end8; j2<(std::min)(k2+rows,packet_cols4); j2+=4)
{
// again we can split vertically in three different parts (transpose, symmetric, normal)
// transpose
for(Index k=k2; k<j2; k++)
{
blockB[count+0] = numext::conj(rhs(j2+0,k));
blockB[count+1] = numext::conj(rhs(j2+1,k));
blockB[count+2] = numext::conj(rhs(j2+2,k));
blockB[count+3] = numext::conj(rhs(j2+3,k));
count += 4;
}
// symmetric
Index h = 0;
for(Index k=j2; k<j2+4; k++)
{
// normal
for (Index w=0 ; w<h; ++w)
blockB[count+w] = rhs(k,j2+w);
blockB[count+h] = numext::real(rhs(k,k));
// transpose
for (Index w=h+1 ; w<4; ++w)
blockB[count+w] = numext::conj(rhs(j2+w,k));
count += 4;
++h;
}
// normal
for(Index k=j2+4; k<end_k; k++)
{
blockB[count+0] = rhs(k,j2+0);
blockB[count+1] = rhs(k,j2+1);
blockB[count+2] = rhs(k,j2+2);
blockB[count+3] = rhs(k,j2+3);
count += 4;
}
}
}
// third part: transposed
if(nr>=8)
{
for(Index j2=k2+rows; j2<packet_cols8; j2+=8)
{
for(Index k=k2; k<end_k; k++)
{
blockB[count+0] = numext::conj(rhs(j2+0,k));
blockB[count+1] = numext::conj(rhs(j2+1,k));
blockB[count+2] = numext::conj(rhs(j2+2,k));
blockB[count+3] = numext::conj(rhs(j2+3,k));
blockB[count+4] = numext::conj(rhs(j2+4,k));
blockB[count+5] = numext::conj(rhs(j2+5,k));
blockB[count+6] = numext::conj(rhs(j2+6,k));
blockB[count+7] = numext::conj(rhs(j2+7,k));
count += 8;
}
}
}
if(nr>=4)
{
for(Index j2=(std::max)(packet_cols8,k2+rows); j2<packet_cols4; j2+=4)
{
for(Index k=k2; k<end_k; k++)
{
blockB[count+0] = numext::conj(rhs(j2+0,k));
blockB[count+1] = numext::conj(rhs(j2+1,k));
blockB[count+2] = numext::conj(rhs(j2+2,k));
blockB[count+3] = numext::conj(rhs(j2+3,k));
count += 4;
}
}
}
// copy the remaining columns one at a time (=> the same with nr==1)
for(Index j2=packet_cols4; j2<cols; ++j2)
{
// transpose
Index half = (std::min)(end_k,j2);
for(Index k=k2; k<half; k++)
{
blockB[count] = numext::conj(rhs(j2,k));
count += 1;
}
if(half==j2 && half<k2+rows)
{
blockB[count] = numext::real(rhs(j2,j2));
count += 1;
}
else
half--;
// normal
for(Index k=half+1; k<k2+rows; k++)
{
blockB[count] = rhs(k,j2);
count += 1;
}
}
}
};
/* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of
* the general matrix matrix product.
*/
template <typename Scalar, typename Index,
int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs,
int ResStorageOrder>
struct product_selfadjoint_matrix;
template <typename Scalar, typename Index,
int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs>
struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor>
{
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs, Index rhsStride,
Scalar* res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
product_selfadjoint_matrix<Scalar, Index,
EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs),
EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
LhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsSelfAdjoint,ConjugateLhs),
ColMajor>
::run(cols, rows, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking);
}
};
template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>
{
static EIGEN_DONT_INLINE void run(
Index rows, Index cols,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>::run(
Index rows, Index cols,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* _res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
Index size = rows;
typedef gebp_traits<Scalar,Scalar> Traits;
typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<Scalar, Index, (LhsStorageOrder == RowMajor) ? ColMajor : RowMajor> LhsTransposeMapper;
typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
LhsTransposeMapper lhs_transpose(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
// kc must be smaller than mc
kc = (std::min)(kc,mc);
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
symm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
gemm_pack_lhs<Scalar, Index, LhsTransposeMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
for(Index k2=0; k2<size; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,size)-k2;
// we have selected one row panel of rhs and one column panel of lhs
// pack rhs's panel into a sequential chunk of memory
// and expand each coeff to a constant packet for further reuse
pack_rhs(blockB, rhs.getSubMapper(k2,0), actual_kc, cols);
// the select lhs's panel has to be split in three different parts:
// 1 - the transposed panel above the diagonal block => transposed packed copy
// 2 - the diagonal block => special packed copy
// 3 - the panel below the diagonal block => generic packed copy
for(Index i2=0; i2<k2; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,k2)-i2;
// transposed packed copy
pack_lhs_transposed(blockA, lhs_transpose.getSubMapper(i2, k2), actual_kc, actual_mc);
gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
}
// the block diagonal
{
const Index actual_mc = (std::min)(k2+kc,size)-k2;
// symmetric packed copy
pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
gebp_kernel(res.getSubMapper(k2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
}
for(Index i2=k2+kc; i2<size; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,size)-i2;
gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
}
}
}
// matrix * selfadjoint product
template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>
{
static EIGEN_DONT_INLINE void run(
Index rows, Index cols,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>::run(
Index rows, Index cols,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* _res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
Index size = cols;
typedef gebp_traits<Scalar,Scalar> Traits;
typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
ResMapper res(_res,resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
symm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
for(Index k2=0; k2<size; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,size)-k2;
pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);
// => GEPP
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
pack_lhs(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);
}
}
}
} // end namespace internal
/***************************************************************************
* Wrapper to product_selfadjoint_matrix
***************************************************************************/
namespace internal {
template<typename Lhs, int LhsMode, typename Rhs, int RhsMode>
struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,RhsMode,false>
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
enum {
LhsIsUpper = (LhsMode&(Upper|Lower))==Upper,
LhsIsSelfAdjoint = (LhsMode&SelfAdjoint)==SelfAdjoint,
RhsIsUpper = (RhsMode&(Upper|Lower))==Upper,
RhsIsSelfAdjoint = (RhsMode&SelfAdjoint)==SelfAdjoint
};
template<typename Dest>
static void run(Dest &dst, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,1> BlockingType;
BlockingType blocking(lhs.rows(), rhs.cols(), lhs.cols(), 1, false);
internal::product_selfadjoint_matrix<Scalar, Index,
EIGEN_LOGICAL_XOR(LhsIsUpper,internal::traits<Lhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint,
NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)),
EIGEN_LOGICAL_XOR(RhsIsUpper,internal::traits<Rhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint,
NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,bool(RhsBlasTraits::NeedToConjugate)),
internal::traits<Dest>::Flags&RowMajorBit ? RowMajor : ColMajor>
::run(
lhs.rows(), rhs.cols(), // sizes
&lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
&rhs.coeffRef(0,0), rhs.outerStride(), // rhs info
&dst.coeffRef(0,0), dst.outerStride(), // result info
actualAlpha, blocking // alpha
);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_H
| 19,632 | 36.611111 | 157 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
********************************************************************************
* Content : Eigen bindings to BLAS F77
* Self adjoint matrix * matrix product functionality based on ?SYMM/?HEMM.
********************************************************************************
*/
#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_BLAS_H
#define EIGEN_SELFADJOINT_MATRIX_MATRIX_BLAS_H
namespace Eigen {
namespace internal {
/* Optimized selfadjoint matrix * matrix (?SYMM/?HEMM) product */
#define EIGEN_BLAS_SYMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
{\
\
static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
EIGTYPE* res, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \
{ \
char side='L', uplo='L'; \
BlasIndex m, n, lda, ldb, ldc; \
const EIGTYPE *a, *b; \
EIGTYPE beta(1); \
MatrixX##EIGPREFIX b_tmp; \
\
/* Set transpose options */ \
/* Set m, n, k */ \
m = convert_index<BlasIndex>(rows); \
n = convert_index<BlasIndex>(cols); \
\
/* Set lda, ldb, ldc */ \
lda = convert_index<BlasIndex>(lhsStride); \
ldb = convert_index<BlasIndex>(rhsStride); \
ldc = convert_index<BlasIndex>(resStride); \
\
/* Set a, b, c */ \
if (LhsStorageOrder==RowMajor) uplo='U'; \
a = _lhs; \
\
if (RhsStorageOrder==RowMajor) { \
Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \
b_tmp = rhs.adjoint(); \
b = b_tmp.data(); \
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
} else b = _rhs; \
\
BLASPREFIX##symm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
\
} \
};
#define EIGEN_BLAS_HEMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
{\
static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
EIGTYPE* res, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \
{ \
char side='L', uplo='L'; \
BlasIndex m, n, lda, ldb, ldc; \
const EIGTYPE *a, *b; \
EIGTYPE beta(1); \
MatrixX##EIGPREFIX b_tmp; \
Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> a_tmp; \
\
/* Set transpose options */ \
/* Set m, n, k */ \
m = convert_index<BlasIndex>(rows); \
n = convert_index<BlasIndex>(cols); \
\
/* Set lda, ldb, ldc */ \
lda = convert_index<BlasIndex>(lhsStride); \
ldb = convert_index<BlasIndex>(rhsStride); \
ldc = convert_index<BlasIndex>(resStride); \
\
/* Set a, b, c */ \
if (((LhsStorageOrder==ColMajor) && ConjugateLhs) || ((LhsStorageOrder==RowMajor) && (!ConjugateLhs))) { \
Map<const Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder>, 0, OuterStride<> > lhs(_lhs,m,m,OuterStride<>(lhsStride)); \
a_tmp = lhs.conjugate(); \
a = a_tmp.data(); \
lda = convert_index<BlasIndex>(a_tmp.outerStride()); \
} else a = _lhs; \
if (LhsStorageOrder==RowMajor) uplo='U'; \
\
if (RhsStorageOrder==ColMajor && (!ConjugateRhs)) { \
b = _rhs; } \
else { \
if (RhsStorageOrder==ColMajor && ConjugateRhs) { \
Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,m,n,OuterStride<>(rhsStride)); \
b_tmp = rhs.conjugate(); \
} else \
if (ConjugateRhs) { \
Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \
b_tmp = rhs.adjoint(); \
} else { \
Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \
b_tmp = rhs.transpose(); \
} \
b = b_tmp.data(); \
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
} \
\
BLASPREFIX##hemm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
\
} \
};
EIGEN_BLAS_SYMM_L(double, double, d, d)
EIGEN_BLAS_SYMM_L(float, float, f, s)
EIGEN_BLAS_HEMM_L(dcomplex, double, cd, z)
EIGEN_BLAS_HEMM_L(scomplex, float, cf, c)
/* Optimized matrix * selfadjoint matrix (?SYMM/?HEMM) product */
#define EIGEN_BLAS_SYMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
{\
\
static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
EIGTYPE* res, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \
{ \
char side='R', uplo='L'; \
BlasIndex m, n, lda, ldb, ldc; \
const EIGTYPE *a, *b; \
EIGTYPE beta(1); \
MatrixX##EIGPREFIX b_tmp; \
\
/* Set m, n, k */ \
m = convert_index<BlasIndex>(rows); \
n = convert_index<BlasIndex>(cols); \
\
/* Set lda, ldb, ldc */ \
lda = convert_index<BlasIndex>(rhsStride); \
ldb = convert_index<BlasIndex>(lhsStride); \
ldc = convert_index<BlasIndex>(resStride); \
\
/* Set a, b, c */ \
if (RhsStorageOrder==RowMajor) uplo='U'; \
a = _rhs; \
\
if (LhsStorageOrder==RowMajor) { \
Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(rhsStride)); \
b_tmp = lhs.adjoint(); \
b = b_tmp.data(); \
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
} else b = _lhs; \
\
BLASPREFIX##symm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
\
} \
};
#define EIGEN_BLAS_HEMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
template <typename Index, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
{\
static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
EIGTYPE* res, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \
{ \
char side='R', uplo='L'; \
BlasIndex m, n, lda, ldb, ldc; \
const EIGTYPE *a, *b; \
EIGTYPE beta(1); \
MatrixX##EIGPREFIX b_tmp; \
Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> a_tmp; \
\
/* Set m, n, k */ \
m = convert_index<BlasIndex>(rows); \
n = convert_index<BlasIndex>(cols); \
\
/* Set lda, ldb, ldc */ \
lda = convert_index<BlasIndex>(rhsStride); \
ldb = convert_index<BlasIndex>(lhsStride); \
ldc = convert_index<BlasIndex>(resStride); \
\
/* Set a, b, c */ \
if (((RhsStorageOrder==ColMajor) && ConjugateRhs) || ((RhsStorageOrder==RowMajor) && (!ConjugateRhs))) { \
Map<const Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder>, 0, OuterStride<> > rhs(_rhs,n,n,OuterStride<>(rhsStride)); \
a_tmp = rhs.conjugate(); \
a = a_tmp.data(); \
lda = convert_index<BlasIndex>(a_tmp.outerStride()); \
} else a = _rhs; \
if (RhsStorageOrder==RowMajor) uplo='U'; \
\
if (LhsStorageOrder==ColMajor && (!ConjugateLhs)) { \
b = _lhs; } \
else { \
if (LhsStorageOrder==ColMajor && ConjugateLhs) { \
Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,m,n,OuterStride<>(lhsStride)); \
b_tmp = lhs.conjugate(); \
} else \
if (ConjugateLhs) { \
Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \
b_tmp = lhs.adjoint(); \
} else { \
Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \
b_tmp = lhs.transpose(); \
} \
b = b_tmp.data(); \
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
} \
\
BLASPREFIX##hemm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \
} \
};
EIGEN_BLAS_SYMM_R(double, double, d, d)
EIGEN_BLAS_SYMM_R(float, float, f, s)
EIGEN_BLAS_HEMM_R(dcomplex, double, cd, z)
EIGEN_BLAS_HEMM_R(scomplex, float, cf, c)
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_BLAS_H
| 10,614 | 37.460145 | 171 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/SelfadjointMatrixVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H
#define EIGEN_SELFADJOINT_MATRIX_VECTOR_H
namespace Eigen {
namespace internal {
/* Optimized selfadjoint matrix * vector product:
* This algorithm processes 2 columns at onces that allows to both reduce
* the number of load/stores of the result by a factor 2 and to reduce
* the instruction dependency.
*/
template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version=Specialized>
struct selfadjoint_matrix_vector_product;
template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
struct selfadjoint_matrix_vector_product
{
static EIGEN_DONT_INLINE void run(
Index size,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs,
Scalar* res,
Scalar alpha);
};
template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
Index size,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs,
Scalar* res,
Scalar alpha)
{
typedef typename packet_traits<Scalar>::type Packet;
typedef typename NumTraits<Scalar>::Real RealScalar;
const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
enum {
IsRowMajor = StorageOrder==RowMajor ? 1 : 0,
IsLower = UpLo == Lower ? 1 : 0,
FirstTriangular = IsRowMajor == IsLower
};
conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> cj0;
conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;
conj_helper<RealScalar,Scalar,false, ConjugateRhs> cjd;
conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0;
conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
if (FirstTriangular)
bound = size - bound;
for (Index j=FirstTriangular ? bound : 0;
j<(FirstTriangular ? size : bound);j+=2)
{
const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
Scalar t0 = cjAlpha * rhs[j];
Packet ptmp0 = pset1<Packet>(t0);
Scalar t1 = cjAlpha * rhs[j+1];
Packet ptmp1 = pset1<Packet>(t1);
Scalar t2(0);
Packet ptmp2 = pset1<Packet>(t2);
Scalar t3(0);
Packet ptmp3 = pset1<Packet>(t3);
Index starti = FirstTriangular ? 0 : j+2;
Index endi = FirstTriangular ? j : size;
Index alignedStart = (starti) + internal::first_default_aligned(&res[starti], endi-starti);
Index alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
res[j] += cjd.pmul(numext::real(A0[j]), t0);
res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1);
if(FirstTriangular)
{
res[j] += cj0.pmul(A1[j], t1);
t3 += cj1.pmul(A1[j], rhs[j]);
}
else
{
res[j+1] += cj0.pmul(A0[j+1],t0);
t2 += cj1.pmul(A0[j+1], rhs[j+1]);
}
for (Index i=starti; i<alignedStart; ++i)
{
res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);
t2 += cj1.pmul(A0[i], rhs[i]);
t3 += cj1.pmul(A1[i], rhs[i]);
}
// Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
// gcc 4.2 does this optimization automatically.
const Scalar* EIGEN_RESTRICT a0It = A0 + alignedStart;
const Scalar* EIGEN_RESTRICT a1It = A1 + alignedStart;
const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart;
Scalar* EIGEN_RESTRICT resIt = res + alignedStart;
for (Index i=alignedStart; i<alignedEnd; i+=PacketSize)
{
Packet A0i = ploadu<Packet>(a0It); a0It += PacketSize;
Packet A1i = ploadu<Packet>(a1It); a1It += PacketSize;
Packet Bi = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases
Packet Xi = pload <Packet>(resIt);
Xi = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi));
ptmp2 = pcj1.pmadd(A0i, Bi, ptmp2);
ptmp3 = pcj1.pmadd(A1i, Bi, ptmp3);
pstore(resIt,Xi); resIt += PacketSize;
}
for (Index i=alignedEnd; i<endi; i++)
{
res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);
t2 += cj1.pmul(A0[i], rhs[i]);
t3 += cj1.pmul(A1[i], rhs[i]);
}
res[j] += alpha * (t2 + predux(ptmp2));
res[j+1] += alpha * (t3 + predux(ptmp3));
}
for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
{
const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
Scalar t1 = cjAlpha * rhs[j];
Scalar t2(0);
res[j] += cjd.pmul(numext::real(A0[j]), t1);
for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
{
res[i] += cj0.pmul(A0[i], t1);
t2 += cj1.pmul(A0[i], rhs[i]);
}
res[j] += alpha * t2;
}
}
} // end namespace internal
/***************************************************************************
* Wrapper to product_selfadjoint_vector
***************************************************************************/
namespace internal {
template<typename Lhs, int LhsMode, typename Rhs>
struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,0,true>
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum { LhsUpLo = LhsMode&(Upper|Lower) };
template<typename Dest>
static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)
{
typedef typename Dest::Scalar ResScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
eigen_assert(dest.rows()==a_lhs.rows() && dest.cols()==a_rhs.cols());
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
enum {
EvalToDest = (Dest::InnerStrideAtCompileTime==1),
UseRhs = (ActualRhsTypeCleaned::InnerStrideAtCompileTime==1)
};
internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest;
internal::gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!UseRhs> static_rhs;
ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
EvalToDest ? dest.data() : static_dest.data());
ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(),
UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data());
if(!EvalToDest)
{
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
Index size = dest.size();
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
#endif
MappedDest(actualDestPtr, dest.size()) = dest;
}
if(!UseRhs)
{
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
Index size = rhs.size();
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
#endif
Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, rhs.size()) = rhs;
}
internal::selfadjoint_matrix_vector_product<Scalar, Index, (internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor,
int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run
(
lhs.rows(), // size
&lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
actualRhsPtr, // rhs info
actualDestPtr, // result info
actualAlpha // scale factor
);
if(!EvalToDest)
dest = MappedDest(actualDestPtr, dest.size());
}
};
template<typename Lhs, typename Rhs, int RhsMode>
struct selfadjoint_product_impl<Lhs,0,true,Rhs,RhsMode,false>
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
enum { RhsUpLo = RhsMode&(Upper|Lower) };
template<typename Dest>
static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)
{
// let's simply transpose the product
Transpose<Dest> destT(dest);
selfadjoint_product_impl<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false,
Transpose<const Lhs>, 0, true>::run(destT, a_rhs.transpose(), a_lhs.transpose(), alpha);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H
| 9,901 | 36.938697 | 149 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to BLAS F77
* Selfadjoint matrix-vector product functionality based on ?SYMV/HEMV.
********************************************************************************
*/
#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H
#define EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H
namespace Eigen {
namespace internal {
/**********************************************************************
* This file implements selfadjoint matrix-vector multiplication using BLAS
**********************************************************************/
// symv/hemv specialization
template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs>
struct selfadjoint_matrix_vector_product_symv :
selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn> {};
#define EIGEN_BLAS_SYMV_SPECIALIZE(Scalar) \
template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \
struct selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Specialized> { \
static void run( \
Index size, const Scalar* lhs, Index lhsStride, \
const Scalar* _rhs, Scalar* res, Scalar alpha) { \
enum {\
IsColMajor = StorageOrder==ColMajor \
}; \
if (IsColMajor == ConjugateLhs) {\
selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn>::run( \
size, lhs, lhsStride, _rhs, res, alpha); \
} else {\
selfadjoint_matrix_vector_product_symv<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs>::run( \
size, lhs, lhsStride, _rhs, res, alpha); \
}\
} \
}; \
EIGEN_BLAS_SYMV_SPECIALIZE(double)
EIGEN_BLAS_SYMV_SPECIALIZE(float)
EIGEN_BLAS_SYMV_SPECIALIZE(dcomplex)
EIGEN_BLAS_SYMV_SPECIALIZE(scomplex)
#define EIGEN_BLAS_SYMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASFUNC) \
template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \
struct selfadjoint_matrix_vector_product_symv<EIGTYPE,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs> \
{ \
typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> SYMVVector;\
\
static void run( \
Index size, const EIGTYPE* lhs, Index lhsStride, \
const EIGTYPE* _rhs, EIGTYPE* res, EIGTYPE alpha) \
{ \
enum {\
IsRowMajor = StorageOrder==RowMajor ? 1 : 0, \
IsLower = UpLo == Lower ? 1 : 0 \
}; \
BlasIndex n=convert_index<BlasIndex>(size), lda=convert_index<BlasIndex>(lhsStride), incx=1, incy=1; \
EIGTYPE beta(1); \
const EIGTYPE *x_ptr; \
char uplo=(IsRowMajor) ? (IsLower ? 'U' : 'L') : (IsLower ? 'L' : 'U'); \
SYMVVector x_tmp; \
if (ConjugateRhs) { \
Map<const SYMVVector, 0 > map_x(_rhs,size,1); \
x_tmp=map_x.conjugate(); \
x_ptr=x_tmp.data(); \
} else x_ptr=_rhs; \
BLASFUNC(&uplo, &n, &numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, &numext::real_ref(beta), (BLASTYPE*)res, &incy); \
}\
};
EIGEN_BLAS_SYMV_SPECIALIZATION(double, double, dsymv_)
EIGEN_BLAS_SYMV_SPECIALIZATION(float, float, ssymv_)
EIGEN_BLAS_SYMV_SPECIALIZATION(dcomplex, double, zhemv_)
EIGEN_BLAS_SYMV_SPECIALIZATION(scomplex, float, chemv_)
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H
| 4,903 | 42.785714 | 157 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/SelfadjointProduct.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELFADJOINT_PRODUCT_H
#define EIGEN_SELFADJOINT_PRODUCT_H
/**********************************************************************
* This file implements a self adjoint product: C += A A^T updating only
* half of the selfadjoint matrix C.
* It corresponds to the level 3 SYRK and level 2 SYR Blas routines.
**********************************************************************/
namespace Eigen {
template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
struct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>
{
static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
{
internal::conj_if<ConjRhs> cj;
typedef Map<const Matrix<Scalar,Dynamic,1> > OtherMap;
typedef typename internal::conditional<ConjLhs,typename OtherMap::ConjugateReturnType,const OtherMap&>::type ConjLhsType;
for (Index i=0; i<size; ++i)
{
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+(UpLo==Lower ? i : 0), (UpLo==Lower ? size-i : (i+1)))
+= (alpha * cj(vecY[i])) * ConjLhsType(OtherMap(vecX+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1)));
}
}
};
template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
struct selfadjoint_rank1_update<Scalar,Index,RowMajor,UpLo,ConjLhs,ConjRhs>
{
static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
{
selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,stride,vecY,vecX,alpha);
}
};
template<typename MatrixType, typename OtherType, int UpLo, bool OtherIsVector = OtherType::IsVectorAtCompileTime>
struct selfadjoint_product_selector;
template<typename MatrixType, typename OtherType, int UpLo>
struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
{
static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
{
typedef typename MatrixType::Scalar Scalar;
typedef internal::blas_traits<OtherType> OtherBlasTraits;
typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;
typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;
typename internal::add_const_on_value_type<ActualOtherType>::type actualOther = OtherBlasTraits::extract(other.derived());
Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived());
enum {
StorageOrder = (internal::traits<MatrixType>::Flags&RowMajorBit) ? RowMajor : ColMajor,
UseOtherDirectly = _ActualOtherType::InnerStrideAtCompileTime==1
};
internal::gemv_static_vector_if<Scalar,OtherType::SizeAtCompileTime,OtherType::MaxSizeAtCompileTime,!UseOtherDirectly> static_other;
ei_declare_aligned_stack_constructed_variable(Scalar, actualOtherPtr, other.size(),
(UseOtherDirectly ? const_cast<Scalar*>(actualOther.data()) : static_other.data()));
if(!UseOtherDirectly)
Map<typename _ActualOtherType::PlainObject>(actualOtherPtr, actualOther.size()) = actualOther;
selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
OtherBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
(!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex>
::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualOtherPtr, actualAlpha);
}
};
template<typename MatrixType, typename OtherType, int UpLo>
struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
{
static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
{
typedef typename MatrixType::Scalar Scalar;
typedef internal::blas_traits<OtherType> OtherBlasTraits;
typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;
typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;
typename internal::add_const_on_value_type<ActualOtherType>::type actualOther = OtherBlasTraits::extract(other.derived());
Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived());
enum {
IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0,
OtherIsRowMajor = _ActualOtherType::Flags&RowMajorBit ? 1 : 0
};
Index size = mat.cols();
Index depth = actualOther.cols();
typedef internal::gemm_blocking_space<IsRowMajor ? RowMajor : ColMajor,Scalar,Scalar,
MatrixType::MaxColsAtCompileTime, MatrixType::MaxColsAtCompileTime, _ActualOtherType::MaxColsAtCompileTime> BlockingType;
BlockingType blocking(size, size, depth, 1, false);
internal::general_matrix_matrix_triangular_product<Index,
Scalar, OtherIsRowMajor ? RowMajor : ColMajor, OtherBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
Scalar, OtherIsRowMajor ? ColMajor : RowMajor, (!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex,
IsRowMajor ? RowMajor : ColMajor, UpLo>
::run(size, depth,
&actualOther.coeffRef(0,0), actualOther.outerStride(), &actualOther.coeffRef(0,0), actualOther.outerStride(),
mat.data(), mat.outerStride(), actualAlpha, blocking);
}
};
// high level API
template<typename MatrixType, unsigned int UpLo>
template<typename DerivedU>
SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
::rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha)
{
selfadjoint_product_selector<MatrixType,DerivedU,UpLo>::run(_expression().const_cast_derived(), u.derived(), alpha);
return *this;
}
} // end namespace Eigen
#endif // EIGEN_SELFADJOINT_PRODUCT_H
| 6,105 | 44.567164 | 136 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/SelfadjointRank2Update.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELFADJOINTRANK2UPTADE_H
#define EIGEN_SELFADJOINTRANK2UPTADE_H
namespace Eigen {
namespace internal {
/* Optimized selfadjoint matrix += alpha * uv' + conj(alpha)*vu'
* It corresponds to the Level2 syr2 BLAS routine
*/
template<typename Scalar, typename Index, typename UType, typename VType, int UpLo>
struct selfadjoint_rank2_update_selector;
template<typename Scalar, typename Index, typename UType, typename VType>
struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
{
static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
{
const Index size = u.size();
for (Index i=0; i<size; ++i)
{
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=
(numext::conj(alpha) * numext::conj(u.coeff(i))) * v.tail(size-i)
+ (alpha * numext::conj(v.coeff(i))) * u.tail(size-i);
}
}
};
template<typename Scalar, typename Index, typename UType, typename VType>
struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>
{
static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
{
const Index size = u.size();
for (Index i=0; i<size; ++i)
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=
(numext::conj(alpha) * numext::conj(u.coeff(i))) * v.head(i+1)
+ (alpha * numext::conj(v.coeff(i))) * u.head(i+1);
}
};
template<bool Cond, typename T> struct conj_expr_if
: conditional<!Cond, const T&,
CwiseUnaryOp<scalar_conjugate_op<typename traits<T>::Scalar>,T> > {};
} // end namespace internal
template<typename MatrixType, unsigned int UpLo>
template<typename DerivedU, typename DerivedV>
SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha)
{
typedef internal::blas_traits<DerivedU> UBlasTraits;
typedef typename UBlasTraits::DirectLinearAccessType ActualUType;
typedef typename internal::remove_all<ActualUType>::type _ActualUType;
typename internal::add_const_on_value_type<ActualUType>::type actualU = UBlasTraits::extract(u.derived());
typedef internal::blas_traits<DerivedV> VBlasTraits;
typedef typename VBlasTraits::DirectLinearAccessType ActualVType;
typedef typename internal::remove_all<ActualVType>::type _ActualVType;
typename internal::add_const_on_value_type<ActualVType>::type actualV = VBlasTraits::extract(v.derived());
// If MatrixType is row major, then we use the routine for lower triangular in the upper triangular case and
// vice versa, and take the complex conjugate of all coefficients and vector entries.
enum { IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived())
* numext::conj(VBlasTraits::extractScalarFactor(v.derived()));
if (IsRowMajor)
actualAlpha = numext::conj(actualAlpha);
typedef typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::type>::type UType;
typedef typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ VBlasTraits::NeedToConjugate,_ActualVType>::type>::type VType;
internal::selfadjoint_rank2_update_selector<Scalar, Index, UType, VType,
(IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)>
::run(_expression().const_cast_derived().data(),_expression().outerStride(),UType(actualU),VType(actualV),actualAlpha);
return *this;
}
} // end namespace Eigen
#endif // EIGEN_SELFADJOINTRANK2UPTADE_H
| 4,066 | 42.265957 | 147 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/TriangularMatrixMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H
#define EIGEN_TRIANGULAR_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
// template<typename Scalar, int mr, int StorageOrder, bool Conjugate, int Mode>
// struct gemm_pack_lhs_triangular
// {
// Matrix<Scalar,mr,mr,
// void operator()(Scalar* blockA, const EIGEN_RESTRICT Scalar* _lhs, int lhsStride, int depth, int rows)
// {
// conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
// const_blas_data_mapper<Scalar, StorageOrder> lhs(_lhs,lhsStride);
// int count = 0;
// const int peeled_mc = (rows/mr)*mr;
// for(int i=0; i<peeled_mc; i+=mr)
// {
// for(int k=0; k<depth; k++)
// for(int w=0; w<mr; w++)
// blockA[count++] = cj(lhs(i+w, k));
// }
// for(int i=peeled_mc; i<rows; i++)
// {
// for(int k=0; k<depth; k++)
// blockA[count++] = cj(lhs(i, k));
// }
// }
// };
/* Optimized triangular matrix * matrix (_TRMM++) product built on top of
* the general matrix matrix product.
*/
template <typename Scalar, typename Index,
int Mode, bool LhsIsTriangular,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs,
int ResStorageOrder, int Version = Specialized>
struct product_triangular_matrix_matrix;
template <typename Scalar, typename Index,
int Mode, bool LhsIsTriangular,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs, int Version>
struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,RowMajor,Version>
{
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs, Index rhsStride,
Scalar* res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
product_triangular_matrix_matrix<Scalar, Index,
(Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),
(!LhsIsTriangular),
RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
ConjugateRhs,
LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
ConjugateLhs,
ColMajor>
::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking);
}
};
// implements col-major += alpha * op(triangular) * op(general)
template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs, int Version>
struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,ColMajor,Version>
{
typedef gebp_traits<Scalar,Scalar> Traits;
enum {
SmallPanelWidth = 2 * EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
IsLower = (Mode&Lower) == Lower,
SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
};
static EIGEN_DONT_INLINE void run(
Index _rows, Index _cols, Index _depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs, int Version>
EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
Index _rows, Index _cols, Index _depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* _res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
// strip zeros
Index diagSize = (std::min)(_rows,_depth);
Index rows = IsLower ? _rows : diagSize;
Index depth = IsLower ? diagSize : _depth;
Index cols = _cols;
typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
// The small panel size must not be larger than blocking size.
// Usually this should never be the case because SmallPanelWidth^2 is very small
// compared to L2 cache size, but let's be safe:
Index panelWidth = (std::min)(Index(SmallPanelWidth),(std::min)(kc,mc));
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer((internal::constructor_without_unaligned_array_assert()));
triangularBuffer.setZero();
if((Mode&ZeroDiag)==ZeroDiag)
triangularBuffer.diagonal().setZero();
else
triangularBuffer.diagonal().setOnes();
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
for(Index k2=IsLower ? depth : 0;
IsLower ? k2>0 : k2<depth;
IsLower ? k2-=kc : k2+=kc)
{
Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);
Index actual_k2 = IsLower ? k2-actual_kc : k2;
// align blocks with the end of the triangular part for trapezoidal lhs
if((!IsLower)&&(k2<rows)&&(k2+actual_kc>rows))
{
actual_kc = rows-k2;
k2 = k2+actual_kc-kc;
}
pack_rhs(blockB, rhs.getSubMapper(actual_k2,0), actual_kc, cols);
// the selected lhs's panel has to be split in three different parts:
// 1 - the part which is zero => skip it
// 2 - the diagonal block => special kernel
// 3 - the dense panel below (lower case) or above (upper case) the diagonal block => GEPP
// the block diagonal, if any:
if(IsLower || actual_k2<rows)
{
// for each small vertical panels of lhs
for (Index k1=0; k1<actual_kc; k1+=panelWidth)
{
Index actualPanelWidth = std::min<Index>(actual_kc-k1, panelWidth);
Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;
Index startBlock = actual_k2+k1;
Index blockBOffset = k1;
// => GEBP with the micro triangular block
// The trick is to pack this micro block while filling the opposite triangular part with zeros.
// To this end we do an extra triangular copy to a small temporary buffer
for (Index k=0;k<actualPanelWidth;++k)
{
if (SetDiag)
triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k);
for (Index i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)
triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k);
}
pack_lhs(blockA, LhsMapper(triangularBuffer.data(), triangularBuffer.outerStride()), actualPanelWidth, actualPanelWidth);
gebp_kernel(res.getSubMapper(startBlock, 0), blockA, blockB,
actualPanelWidth, actualPanelWidth, cols, alpha,
actualPanelWidth, actual_kc, 0, blockBOffset);
// GEBP with remaining micro panel
if (lengthTarget>0)
{
Index startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;
pack_lhs(blockA, lhs.getSubMapper(startTarget,startBlock), actualPanelWidth, lengthTarget);
gebp_kernel(res.getSubMapper(startTarget, 0), blockA, blockB,
lengthTarget, actualPanelWidth, cols, alpha,
actualPanelWidth, actual_kc, 0, blockBOffset);
}
}
}
// the part below (lower case) or above (upper case) the diagonal => GEPP
{
Index start = IsLower ? k2 : 0;
Index end = IsLower ? rows : (std::min)(actual_k2,rows);
for(Index i2=start; i2<end; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,end)-i2;
gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
(blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);
gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc,
actual_kc, cols, alpha, -1, -1, 0, 0);
}
}
}
}
// implements col-major += alpha * op(general) * op(triangular)
template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs, int Version>
struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,ColMajor,Version>
{
typedef gebp_traits<Scalar,Scalar> Traits;
enum {
SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
IsLower = (Mode&Lower) == Lower,
SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
};
static EIGEN_DONT_INLINE void run(
Index _rows, Index _cols, Index _depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs, int Version>
EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
Index _rows, Index _cols, Index _depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* _res, Index resStride,
const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
const Index PacketBytes = packet_traits<Scalar>::size*sizeof(Scalar);
// strip zeros
Index diagSize = (std::min)(_cols,_depth);
Index rows = _rows;
Index depth = IsLower ? _depth : diagSize;
Index cols = IsLower ? diagSize : _cols;
typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols+EIGEN_MAX_ALIGN_BYTES/sizeof(Scalar);
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer((internal::constructor_without_unaligned_array_assert()));
triangularBuffer.setZero();
if((Mode&ZeroDiag)==ZeroDiag)
triangularBuffer.diagonal().setZero();
else
triangularBuffer.diagonal().setOnes();
gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;
for(Index k2=IsLower ? 0 : depth;
IsLower ? k2<depth : k2>0;
IsLower ? k2+=kc : k2-=kc)
{
Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);
Index actual_k2 = IsLower ? k2 : k2-actual_kc;
// align blocks with the end of the triangular part for trapezoidal rhs
if(IsLower && (k2<cols) && (actual_k2+actual_kc>cols))
{
actual_kc = cols-k2;
k2 = actual_k2 + actual_kc - kc;
}
// remaining size
Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;
// size of the triangular part
Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
Scalar* geb = blockB+ts*ts;
geb = geb + internal::first_aligned<PacketBytes>(geb,PacketBytes/sizeof(Scalar));
pack_rhs(geb, rhs.getSubMapper(actual_k2,IsLower ? 0 : k2), actual_kc, rs);
// pack the triangular part of the rhs padding the unrolled blocks with zeros
if(ts>0)
{
for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
{
Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
Index actual_j2 = actual_k2 + j2;
Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
// general part
pack_rhs_panel(blockB+j2*actual_kc,
rhs.getSubMapper(actual_k2+panelOffset, actual_j2),
panelLength, actualPanelWidth,
actual_kc, panelOffset);
// append the triangular part via a temporary buffer
for (Index j=0;j<actualPanelWidth;++j)
{
if (SetDiag)
triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j);
for (Index k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)
triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j);
}
pack_rhs_panel(blockB+j2*actual_kc,
RhsMapper(triangularBuffer.data(), triangularBuffer.outerStride()),
actualPanelWidth, actualPanelWidth,
actual_kc, j2);
}
}
for (Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(mc,rows-i2);
pack_lhs(blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);
// triangular kernel
if(ts>0)
{
for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
{
Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;
Index blockOffset = IsLower ? j2 : 0;
gebp_kernel(res.getSubMapper(i2, actual_k2 + j2),
blockA, blockB+j2*actual_kc,
actual_mc, panelLength, actualPanelWidth,
alpha,
actual_kc, actual_kc, // strides
blockOffset, blockOffset);// offsets
}
}
gebp_kernel(res.getSubMapper(i2, IsLower ? 0 : k2),
blockA, geb, actual_mc, actual_kc, rs,
alpha,
-1, -1, 0, 0);
}
}
}
/***************************************************************************
* Wrapper to product_triangular_matrix_matrix
***************************************************************************/
} // end namespace internal
namespace internal {
template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
{
template<typename Dest> static void run(Dest& dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar& alpha)
{
typedef typename Dest::Scalar Scalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,4> BlockingType;
enum { IsLower = (Mode&Lower) == Lower };
Index stripedRows = ((!LhsIsTriangular) || (IsLower)) ? lhs.rows() : (std::min)(lhs.rows(),lhs.cols());
Index stripedCols = ((LhsIsTriangular) || (!IsLower)) ? rhs.cols() : (std::min)(rhs.cols(),rhs.rows());
Index stripedDepth = LhsIsTriangular ? ((!IsLower) ? lhs.cols() : (std::min)(lhs.cols(),lhs.rows()))
: ((IsLower) ? rhs.rows() : (std::min)(rhs.rows(),rhs.cols()));
BlockingType blocking(stripedRows, stripedCols, stripedDepth, 1, false);
internal::product_triangular_matrix_matrix<Scalar, Index,
Mode, LhsIsTriangular,
(internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
(internal::traits<ActualRhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
(internal::traits<Dest >::Flags&RowMajorBit) ? RowMajor : ColMajor>
::run(
stripedRows, stripedCols, stripedDepth, // sizes
&lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
&rhs.coeffRef(0,0), rhs.outerStride(), // rhs info
&dst.coeffRef(0,0), dst.outerStride(), // result info
actualAlpha, blocking
);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H
| 19,345 | 42.769231 | 142 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to BLAS F77
* Triangular matrix * matrix product functionality based on ?TRMM.
********************************************************************************
*/
#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H
#define EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H
namespace Eigen {
namespace internal {
template <typename Scalar, typename Index,
int Mode, bool LhsIsTriangular,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs,
int ResStorageOrder>
struct product_triangular_matrix_matrix_trmm :
product_triangular_matrix_matrix<Scalar,Index,Mode,
LhsIsTriangular,LhsStorageOrder,ConjugateLhs,
RhsStorageOrder, ConjugateRhs, ResStorageOrder, BuiltIn> {};
// try to go to BLAS specialization
#define EIGEN_BLAS_TRMM_SPECIALIZE(Scalar, LhsIsTriangular) \
template <typename Index, int Mode, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_triangular_matrix_matrix<Scalar,Index, Mode, LhsIsTriangular, \
LhsStorageOrder,ConjugateLhs, RhsStorageOrder,ConjugateRhs,ColMajor,Specialized> { \
static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\
const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar,Scalar>& blocking) { \
product_triangular_matrix_matrix_trmm<Scalar,Index,Mode, \
LhsIsTriangular,LhsStorageOrder,ConjugateLhs, \
RhsStorageOrder, ConjugateRhs, ColMajor>::run( \
_rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
} \
};
EIGEN_BLAS_TRMM_SPECIALIZE(double, true)
EIGEN_BLAS_TRMM_SPECIALIZE(double, false)
EIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, true)
EIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, false)
EIGEN_BLAS_TRMM_SPECIALIZE(float, true)
EIGEN_BLAS_TRMM_SPECIALIZE(float, false)
EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, true)
EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, false)
// implements col-major += alpha * op(triangular) * op(general)
#define EIGEN_BLAS_TRMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
template <typename Index, int Mode, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \
{ \
enum { \
IsLower = (Mode&Lower) == Lower, \
SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
LowUp = IsLower ? Lower : Upper, \
conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \
}; \
\
static void run( \
Index _rows, Index _cols, Index _depth, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
EIGTYPE* res, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
{ \
Index diagSize = (std::min)(_rows,_depth); \
Index rows = IsLower ? _rows : diagSize; \
Index depth = IsLower ? diagSize : _depth; \
Index cols = _cols; \
\
typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \
\
/* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \
if (rows != depth) { \
\
/* FIXME handle mkl_domain_get_max_threads */ \
/*int nthr = mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS);*/ int nthr = 1;\
\
if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
/* Most likely no benefit to call TRMM or GEMM from BLAS */ \
product_triangular_matrix_matrix<EIGTYPE,Index,Mode,true, \
LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
_rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
/*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \
} else { \
/* Make sense to call GEMM */ \
Map<const MatrixLhs, 0, OuterStride<> > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \
MatrixLhs aa_tmp=lhsMap.template triangularView<Mode>(); \
BlasIndex aStride = convert_index<BlasIndex>(aa_tmp.outerStride()); \
gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth, 1, true); \
general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, gemm_blocking, 0); \
\
/*std::cout << "TRMM_L: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \
} \
return; \
} \
char side = 'L', transa, uplo, diag = 'N'; \
EIGTYPE *b; \
const EIGTYPE *a; \
BlasIndex m, n, lda, ldb; \
\
/* Set m, n */ \
m = convert_index<BlasIndex>(diagSize); \
n = convert_index<BlasIndex>(cols); \
\
/* Set trans */ \
transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \
\
/* Set b, ldb */ \
Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols,OuterStride<>(rhsStride)); \
MatrixX##EIGPREFIX b_tmp; \
\
if (ConjugateRhs) b_tmp = rhs.conjugate(); else b_tmp = rhs; \
b = b_tmp.data(); \
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
\
/* Set uplo */ \
uplo = IsLower ? 'L' : 'U'; \
if (LhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
/* Set a, lda */ \
Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \
MatrixLhs a_tmp; \
\
if ((conjA!=0) || (SetDiag==0)) { \
if (conjA) a_tmp = lhs.conjugate(); else a_tmp = lhs; \
if (IsZeroDiag) \
a_tmp.diagonal().setZero(); \
else if (IsUnitDiag) \
a_tmp.diagonal().setOnes();\
a = a_tmp.data(); \
lda = convert_index<BlasIndex>(a_tmp.outerStride()); \
} else { \
a = _lhs; \
lda = convert_index<BlasIndex>(lhsStride); \
} \
/*std::cout << "TRMM_L: A is square! Go to BLAS TRMM implementation! \n";*/ \
/* call ?trmm*/ \
BLASPREFIX##trmm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \
\
/* Add op(a_triangular)*b into res*/ \
Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \
res_tmp=res_tmp+b_tmp; \
} \
};
EIGEN_BLAS_TRMM_L(double, double, d, d)
EIGEN_BLAS_TRMM_L(dcomplex, double, cd, z)
EIGEN_BLAS_TRMM_L(float, float, f, s)
EIGEN_BLAS_TRMM_L(scomplex, float, cf, c)
// implements col-major += alpha * op(general) * op(triangular)
#define EIGEN_BLAS_TRMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
template <typename Index, int Mode, \
int LhsStorageOrder, bool ConjugateLhs, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \
{ \
enum { \
IsLower = (Mode&Lower) == Lower, \
SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
LowUp = IsLower ? Lower : Upper, \
conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \
}; \
\
static void run( \
Index _rows, Index _cols, Index _depth, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
EIGTYPE* res, Index resStride, \
EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
{ \
Index diagSize = (std::min)(_cols,_depth); \
Index rows = _rows; \
Index depth = IsLower ? _depth : diagSize; \
Index cols = IsLower ? diagSize : _cols; \
\
typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \
\
/* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \
if (cols != depth) { \
\
int nthr = 1 /*mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS)*/; \
\
if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
/* Most likely no benefit to call TRMM or GEMM from BLAS*/ \
product_triangular_matrix_matrix<EIGTYPE,Index,Mode,false, \
LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
_rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
/*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \
} else { \
/* Make sense to call GEMM */ \
Map<const MatrixRhs, 0, OuterStride<> > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \
MatrixRhs aa_tmp=rhsMap.template triangularView<Mode>(); \
BlasIndex aStride = convert_index<BlasIndex>(aa_tmp.outerStride()); \
gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth, 1, true); \
general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, gemm_blocking, 0); \
\
/*std::cout << "TRMM_R: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \
} \
return; \
} \
char side = 'R', transa, uplo, diag = 'N'; \
EIGTYPE *b; \
const EIGTYPE *a; \
BlasIndex m, n, lda, ldb; \
\
/* Set m, n */ \
m = convert_index<BlasIndex>(rows); \
n = convert_index<BlasIndex>(diagSize); \
\
/* Set trans */ \
transa = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \
\
/* Set b, ldb */ \
Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \
MatrixX##EIGPREFIX b_tmp; \
\
if (ConjugateLhs) b_tmp = lhs.conjugate(); else b_tmp = lhs; \
b = b_tmp.data(); \
ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \
\
/* Set uplo */ \
uplo = IsLower ? 'L' : 'U'; \
if (RhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
/* Set a, lda */ \
Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols, OuterStride<>(rhsStride)); \
MatrixRhs a_tmp; \
\
if ((conjA!=0) || (SetDiag==0)) { \
if (conjA) a_tmp = rhs.conjugate(); else a_tmp = rhs; \
if (IsZeroDiag) \
a_tmp.diagonal().setZero(); \
else if (IsUnitDiag) \
a_tmp.diagonal().setOnes();\
a = a_tmp.data(); \
lda = convert_index<BlasIndex>(a_tmp.outerStride()); \
} else { \
a = _rhs; \
lda = convert_index<BlasIndex>(rhsStride); \
} \
/*std::cout << "TRMM_R: A is square! Go to BLAS TRMM implementation! \n";*/ \
/* call ?trmm*/ \
BLASPREFIX##trmm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \
\
/* Add op(a_triangular)*b into res*/ \
Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \
res_tmp=res_tmp+b_tmp; \
} \
};
EIGEN_BLAS_TRMM_R(double, double, d, d)
EIGEN_BLAS_TRMM_R(dcomplex, double, cd, z)
EIGEN_BLAS_TRMM_R(float, float, f, s)
EIGEN_BLAS_TRMM_R(scomplex, float, cf, c)
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H
| 13,238 | 42.693069 | 134 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/TriangularMatrixVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TRIANGULARMATRIXVECTOR_H
#define EIGEN_TRIANGULARMATRIXVECTOR_H
namespace Eigen {
namespace internal {
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder, int Version=Specialized>
struct triangular_matrix_vector_product;
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>
struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>
{
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
enum {
IsLower = ((Mode&Lower)==Lower),
HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
};
static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha);
};
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>
EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>
::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha)
{
static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
Index size = (std::min)(_rows,_cols);
Index rows = IsLower ? _rows : (std::min)(_rows,_cols);
Index cols = IsLower ? (std::min)(_rows,_cols) : _cols;
typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;
const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride));
typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs);
typedef Map<const Matrix<RhsScalar,Dynamic,1>, 0, InnerStride<> > RhsMap;
const RhsMap rhs(_rhs,cols,InnerStride<>(rhsIncr));
typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs);
typedef Map<Matrix<ResScalar,Dynamic,1> > ResMap;
ResMap res(_res,rows);
typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;
for (Index pi=0; pi<size; pi+=PanelWidth)
{
Index actualPanelWidth = (std::min)(PanelWidth, size-pi);
for (Index k=0; k<actualPanelWidth; ++k)
{
Index i = pi + k;
Index s = IsLower ? ((HasUnitDiag||HasZeroDiag) ? i+1 : i ) : pi;
Index r = IsLower ? actualPanelWidth-k : k+1;
if ((!(HasUnitDiag||HasZeroDiag)) || (--r)>0)
res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r);
if (HasUnitDiag)
res.coeffRef(i) += alpha * cjRhs.coeff(i);
}
Index r = IsLower ? rows - pi - actualPanelWidth : pi;
if (r>0)
{
Index s = IsLower ? pi+actualPanelWidth : 0;
general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs,BuiltIn>::run(
r, actualPanelWidth,
LhsMapper(&lhs.coeffRef(s,pi), lhsStride),
RhsMapper(&rhs.coeffRef(pi), rhsIncr),
&res.coeffRef(s), resIncr, alpha);
}
}
if((!IsLower) && cols>size)
{
general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs>::run(
rows, cols-size,
LhsMapper(&lhs.coeffRef(0,size), lhsStride),
RhsMapper(&rhs.coeffRef(size), rhsIncr),
_res, resIncr, alpha);
}
}
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
{
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
enum {
IsLower = ((Mode&Lower)==Lower),
HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
};
static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
};
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
{
static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
Index diagSize = (std::min)(_rows,_cols);
Index rows = IsLower ? _rows : diagSize;
Index cols = IsLower ? diagSize : _cols;
typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap;
const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride));
typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs);
typedef Map<const Matrix<RhsScalar,Dynamic,1> > RhsMap;
const RhsMap rhs(_rhs,cols);
typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs);
typedef Map<Matrix<ResScalar,Dynamic,1>, 0, InnerStride<> > ResMap;
ResMap res(_res,rows,InnerStride<>(resIncr));
typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;
for (Index pi=0; pi<diagSize; pi+=PanelWidth)
{
Index actualPanelWidth = (std::min)(PanelWidth, diagSize-pi);
for (Index k=0; k<actualPanelWidth; ++k)
{
Index i = pi + k;
Index s = IsLower ? pi : ((HasUnitDiag||HasZeroDiag) ? i+1 : i);
Index r = IsLower ? k+1 : actualPanelWidth-k;
if ((!(HasUnitDiag||HasZeroDiag)) || (--r)>0)
res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum();
if (HasUnitDiag)
res.coeffRef(i) += alpha * cjRhs.coeff(i);
}
Index r = IsLower ? pi : cols - pi - actualPanelWidth;
if (r>0)
{
Index s = IsLower ? 0 : pi + actualPanelWidth;
general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs,BuiltIn>::run(
actualPanelWidth, r,
LhsMapper(&lhs.coeffRef(pi,s), lhsStride),
RhsMapper(&rhs.coeffRef(s), rhsIncr),
&res.coeffRef(pi), resIncr, alpha);
}
}
if(IsLower && rows>diagSize)
{
general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs>::run(
rows-diagSize, cols,
LhsMapper(&lhs.coeffRef(diagSize,0), lhsStride),
RhsMapper(&rhs.coeffRef(0), rhsIncr),
&res.coeffRef(diagSize), resIncr, alpha);
}
}
/***************************************************************************
* Wrapper to product_triangular_vector
***************************************************************************/
template<int Mode,int StorageOrder>
struct trmv_selector;
} // end namespace internal
namespace internal {
template<int Mode, typename Lhs, typename Rhs>
struct triangular_product_impl<Mode,true,Lhs,false,Rhs,true>
{
template<typename Dest> static void run(Dest& dst, const Lhs &lhs, const Rhs &rhs, const typename Dest::Scalar& alpha)
{
eigen_assert(dst.rows()==lhs.rows() && dst.cols()==rhs.cols());
internal::trmv_selector<Mode,(int(internal::traits<Lhs>::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(lhs, rhs, dst, alpha);
}
};
template<int Mode, typename Lhs, typename Rhs>
struct triangular_product_impl<Mode,false,Lhs,true,Rhs,false>
{
template<typename Dest> static void run(Dest& dst, const Lhs &lhs, const Rhs &rhs, const typename Dest::Scalar& alpha)
{
eigen_assert(dst.rows()==lhs.rows() && dst.cols()==rhs.cols());
Transpose<Dest> dstT(dst);
internal::trmv_selector<(Mode & (UnitDiag|ZeroDiag)) | ((Mode & Lower) ? Upper : Lower),
(int(internal::traits<Rhs>::Flags)&RowMajorBit) ? ColMajor : RowMajor>
::run(rhs.transpose(),lhs.transpose(), dstT, alpha);
}
};
} // end namespace internal
namespace internal {
// TODO: find a way to factorize this piece of code with gemv_selector since the logic is exactly the same.
template<int Mode> struct trmv_selector<Mode,ColMajor>
{
template<typename Lhs, typename Rhs, typename Dest>
static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
{
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef typename Dest::Scalar ResScalar;
typedef typename Dest::RealScalar RealScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);
ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)
* RhsBlasTraits::extractScalarFactor(rhs);
enum {
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
// on, the other hand it is good for the cache to pack the vector anyways...
EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
};
gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
evalToDest ? dest.data() : static_dest.data());
if(!evalToDest)
{
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
Index size = dest.size();
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
#endif
if(!alphaIsCompatible)
{
MappedDest(actualDestPtr, dest.size()).setZero();
compatibleAlpha = RhsScalar(1);
}
else
MappedDest(actualDestPtr, dest.size()) = dest;
}
internal::triangular_matrix_vector_product
<Index,Mode,
LhsScalar, LhsBlasTraits::NeedToConjugate,
RhsScalar, RhsBlasTraits::NeedToConjugate,
ColMajor>
::run(actualLhs.rows(),actualLhs.cols(),
actualLhs.data(),actualLhs.outerStride(),
actualRhs.data(),actualRhs.innerStride(),
actualDestPtr,1,compatibleAlpha);
if (!evalToDest)
{
if(!alphaIsCompatible)
dest += actualAlpha * MappedDest(actualDestPtr, dest.size());
else
dest = MappedDest(actualDestPtr, dest.size());
}
}
};
template<int Mode> struct trmv_selector<Mode,RowMajor>
{
template<typename Lhs, typename Rhs, typename Dest>
static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
{
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef typename Dest::Scalar ResScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);
typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);
ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)
* RhsBlasTraits::extractScalarFactor(rhs);
enum {
DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1
};
gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),
DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
if(!DirectlyUseRhs)
{
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
Index size = actualRhs.size();
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
#endif
Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
}
internal::triangular_matrix_vector_product
<Index,Mode,
LhsScalar, LhsBlasTraits::NeedToConjugate,
RhsScalar, RhsBlasTraits::NeedToConjugate,
RowMajor>
::run(actualLhs.rows(),actualLhs.cols(),
actualLhs.data(),actualLhs.outerStride(),
actualRhsPtr,1,
dest.data(),dest.innerStride(),
actualAlpha);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRIANGULARMATRIXVECTOR_H
| 14,216 | 41.186944 | 147 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to BLAS F77
* Triangular matrix-vector product functionality based on ?TRMV.
********************************************************************************
*/
#ifndef EIGEN_TRIANGULAR_MATRIX_VECTOR_BLAS_H
#define EIGEN_TRIANGULAR_MATRIX_VECTOR_BLAS_H
namespace Eigen {
namespace internal {
/**********************************************************************
* This file implements triangular matrix-vector multiplication using BLAS
**********************************************************************/
// trmv/hemv specialization
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder>
struct triangular_matrix_vector_product_trmv :
triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,StorageOrder,BuiltIn> {};
#define EIGEN_BLAS_TRMV_SPECIALIZE(Scalar) \
template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor,Specialized> { \
static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor>::run( \
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
} \
}; \
template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor,Specialized> { \
static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor>::run( \
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
} \
};
EIGEN_BLAS_TRMV_SPECIALIZE(double)
EIGEN_BLAS_TRMV_SPECIALIZE(float)
EIGEN_BLAS_TRMV_SPECIALIZE(dcomplex)
EIGEN_BLAS_TRMV_SPECIALIZE(scomplex)
// implements col-major: res += alpha * op(triangular) * vector
#define EIGEN_BLAS_TRMV_CM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor> { \
enum { \
IsLower = (Mode&Lower) == Lower, \
SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
LowUp = IsLower ? Lower : Upper \
}; \
static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
{ \
if (ConjLhs || IsZeroDiag) { \
triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor,BuiltIn>::run( \
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
return; \
}\
Index size = (std::min)(_rows,_cols); \
Index rows = IsLower ? _rows : size; \
Index cols = IsLower ? size : _cols; \
\
typedef VectorX##EIGPREFIX VectorRhs; \
EIGTYPE *x, *y;\
\
/* Set x*/ \
Map<const VectorRhs, 0, InnerStride<> > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \
VectorRhs x_tmp; \
if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
x = x_tmp.data(); \
\
/* Square part handling */\
\
char trans, uplo, diag; \
BlasIndex m, n, lda, incx, incy; \
EIGTYPE const *a; \
EIGTYPE beta(1); \
\
/* Set m, n */ \
n = convert_index<BlasIndex>(size); \
lda = convert_index<BlasIndex>(lhsStride); \
incx = 1; \
incy = convert_index<BlasIndex>(resIncr); \
\
/* Set uplo, trans and diag*/ \
trans = 'N'; \
uplo = IsLower ? 'L' : 'U'; \
diag = IsUnitDiag ? 'U' : 'N'; \
\
/* call ?TRMV*/ \
BLASPREFIX##trmv_(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \
\
/* Add op(a_tr)rhs into res*/ \
BLASPREFIX##axpy_(&n, &numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \
/* Non-square case - doesn't fit to BLAS ?TRMV. Fall to default triangular product*/ \
if (size<(std::max)(rows,cols)) { \
if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
x = x_tmp.data(); \
if (size<rows) { \
y = _res + size*resIncr; \
a = _lhs + size; \
m = convert_index<BlasIndex>(rows-size); \
n = convert_index<BlasIndex>(size); \
} \
else { \
x += size; \
y = _res; \
a = _lhs + size*lda; \
m = convert_index<BlasIndex>(size); \
n = convert_index<BlasIndex>(cols-size); \
} \
BLASPREFIX##gemv_(&trans, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, &numext::real_ref(beta), (BLASTYPE*)y, &incy); \
} \
} \
};
EIGEN_BLAS_TRMV_CM(double, double, d, d)
EIGEN_BLAS_TRMV_CM(dcomplex, double, cd, z)
EIGEN_BLAS_TRMV_CM(float, float, f, s)
EIGEN_BLAS_TRMV_CM(scomplex, float, cf, c)
// implements row-major: res += alpha * op(triangular) * vector
#define EIGEN_BLAS_TRMV_RM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \
template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor> { \
enum { \
IsLower = (Mode&Lower) == Lower, \
SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
LowUp = IsLower ? Lower : Upper \
}; \
static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
{ \
if (IsZeroDiag) { \
triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor,BuiltIn>::run( \
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
return; \
}\
Index size = (std::min)(_rows,_cols); \
Index rows = IsLower ? _rows : size; \
Index cols = IsLower ? size : _cols; \
\
typedef VectorX##EIGPREFIX VectorRhs; \
EIGTYPE *x, *y;\
\
/* Set x*/ \
Map<const VectorRhs, 0, InnerStride<> > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \
VectorRhs x_tmp; \
if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
x = x_tmp.data(); \
\
/* Square part handling */\
\
char trans, uplo, diag; \
BlasIndex m, n, lda, incx, incy; \
EIGTYPE const *a; \
EIGTYPE beta(1); \
\
/* Set m, n */ \
n = convert_index<BlasIndex>(size); \
lda = convert_index<BlasIndex>(lhsStride); \
incx = 1; \
incy = convert_index<BlasIndex>(resIncr); \
\
/* Set uplo, trans and diag*/ \
trans = ConjLhs ? 'C' : 'T'; \
uplo = IsLower ? 'U' : 'L'; \
diag = IsUnitDiag ? 'U' : 'N'; \
\
/* call ?TRMV*/ \
BLASPREFIX##trmv_(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \
\
/* Add op(a_tr)rhs into res*/ \
BLASPREFIX##axpy_(&n, &numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \
/* Non-square case - doesn't fit to BLAS ?TRMV. Fall to default triangular product*/ \
if (size<(std::max)(rows,cols)) { \
if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
x = x_tmp.data(); \
if (size<rows) { \
y = _res + size*resIncr; \
a = _lhs + size*lda; \
m = convert_index<BlasIndex>(rows-size); \
n = convert_index<BlasIndex>(size); \
} \
else { \
x += size; \
y = _res; \
a = _lhs + size; \
m = convert_index<BlasIndex>(size); \
n = convert_index<BlasIndex>(cols-size); \
} \
BLASPREFIX##gemv_(&trans, &n, &m, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, &numext::real_ref(beta), (BLASTYPE*)y, &incy); \
} \
} \
};
EIGEN_BLAS_TRMV_RM(double, double, d, d)
EIGEN_BLAS_TRMV_RM(dcomplex, double, cd, z)
EIGEN_BLAS_TRMV_RM(float, float, f, s)
EIGEN_BLAS_TRMV_RM(scomplex, float, cf, c)
} // end namespase internal
} // end namespace Eigen
#endif // EIGEN_TRIANGULAR_MATRIX_VECTOR_BLAS_H
| 9,895 | 39.892562 | 166 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/TriangularSolverMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_H
#define EIGEN_TRIANGULAR_SOLVER_MATRIX_H
namespace Eigen {
namespace internal {
// if the rhs is row major, let's transpose the product
template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder>
struct triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor>
{
static void run(
Index size, Index cols,
const Scalar* tri, Index triStride,
Scalar* _other, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking)
{
triangular_solve_matrix<
Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft,
(Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper),
NumTraits<Scalar>::IsComplex && Conjugate,
TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor>
::run(size, cols, tri, triStride, _other, otherStride, blocking);
}
};
/* Optimized triangular solver with multiple right hand side and the triangular matrix on the left
*/
template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>
{
static EIGEN_DONT_INLINE void run(
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
Scalar* _other, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
Scalar* _other, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking)
{
Index cols = otherSize;
typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> TriMapper;
typedef blas_data_mapper<Scalar, Index, ColMajor> OtherMapper;
TriMapper tri(_tri, triStride);
OtherMapper other(_other, otherStride);
typedef gebp_traits<Scalar,Scalar> Traits;
enum {
SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
IsLower = (Mode&Lower) == Lower
};
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(size,blocking.mc()); // cache block size along the M direction
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
conj_if<Conjugate> conj;
gebp_kernel<Scalar, Scalar, Index, OtherMapper, Traits::mr, Traits::nr, Conjugate, false> gebp_kernel;
gemm_pack_lhs<Scalar, Index, TriMapper, Traits::mr, Traits::LhsProgress, TriStorageOrder> pack_lhs;
gemm_pack_rhs<Scalar, Index, OtherMapper, Traits::nr, ColMajor, false, true> pack_rhs;
// the goal here is to subdivise the Rhs panels such that we keep some cache
// coherence when accessing the rhs elements
std::ptrdiff_t l1, l2, l3;
manage_caching_sizes(GetAction, &l1, &l2, &l3);
Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * std::max<Index>(otherStride,size)) : 0;
subcols = std::max<Index>((subcols/Traits::nr)*Traits::nr, Traits::nr);
for(Index k2=IsLower ? 0 : size;
IsLower ? k2<size : k2>0;
IsLower ? k2+=kc : k2-=kc)
{
const Index actual_kc = (std::min)(IsLower ? size-k2 : k2, kc);
// We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
// and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
// A11 (the triangular part) and A21 the remaining rectangular part.
// Then the high level algorithm is:
// - B = R1 => general block copy (done during the next step)
// - R1 = A11^-1 B => tricky part
// - update B from the new R1 => actually this has to be performed continuously during the above step
// - R2 -= A21 * B => GEPP
// The tricky part: compute R1 = A11^-1 B while updating B from R1
// The idea is to split A11 into multiple small vertical panels.
// Each panel can be split into a small triangular part T1k which is processed without optimization,
// and the remaining small part T2k which is processed using gebp with appropriate block strides
for(Index j2=0; j2<cols; j2+=subcols)
{
Index actual_cols = (std::min)(cols-j2,subcols);
// for each small vertical panels [T1k^T, T2k^T]^T of lhs
for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
{
Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
// tr solve
for (Index k=0; k<actualPanelWidth; ++k)
{
// TODO write a small kernel handling this (can be shared with trsv)
Index i = IsLower ? k2+k1+k : k2-k1-k-1;
Index rs = actualPanelWidth - k - 1; // remaining size
Index s = TriStorageOrder==RowMajor ? (IsLower ? k2+k1 : i+1)
: IsLower ? i+1 : i-rs;
Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i));
for (Index j=j2; j<j2+actual_cols; ++j)
{
if (TriStorageOrder==RowMajor)
{
Scalar b(0);
const Scalar* l = &tri(i,s);
Scalar* r = &other(s,j);
for (Index i3=0; i3<k; ++i3)
b += conj(l[i3]) * r[i3];
other(i,j) = (other(i,j) - b)*a;
}
else
{
Scalar b = (other(i,j) *= a);
Scalar* r = &other(s,j);
const Scalar* l = &tri(s,i);
for (Index i3=0;i3<rs;++i3)
r[i3] -= b * conj(l[i3]);
}
}
}
Index lengthTarget = actual_kc-k1-actualPanelWidth;
Index startBlock = IsLower ? k2+k1 : k2-k1-actualPanelWidth;
Index blockBOffset = IsLower ? k1 : lengthTarget;
// update the respective rows of B from other
pack_rhs(blockB+actual_kc*j2, other.getSubMapper(startBlock,j2), actualPanelWidth, actual_cols, actual_kc, blockBOffset);
// GEBP
if (lengthTarget>0)
{
Index startTarget = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc;
pack_lhs(blockA, tri.getSubMapper(startTarget,startBlock), actualPanelWidth, lengthTarget);
gebp_kernel(other.getSubMapper(startTarget,j2), blockA, blockB+actual_kc*j2, lengthTarget, actualPanelWidth, actual_cols, Scalar(-1),
actualPanelWidth, actual_kc, 0, blockBOffset);
}
}
}
// R2 -= A21 * B => GEPP
{
Index start = IsLower ? k2+kc : 0;
Index end = IsLower ? size : k2-kc;
for(Index i2=start; i2<end; i2+=mc)
{
const Index actual_mc = (std::min)(mc,end-i2);
if (actual_mc>0)
{
pack_lhs(blockA, tri.getSubMapper(i2, IsLower ? k2 : k2-kc), actual_kc, actual_mc);
gebp_kernel(other.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, Scalar(-1), -1, -1, 0, 0);
}
}
}
}
}
/* Optimized triangular solver with multiple left hand sides and the triangular matrix on the right
*/
template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>
{
static EIGEN_DONT_INLINE void run(
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
Scalar* _other, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking);
};
template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
Scalar* _other, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking)
{
Index rows = otherSize;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef blas_data_mapper<Scalar, Index, ColMajor> LhsMapper;
typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> RhsMapper;
LhsMapper lhs(_other, otherStride);
RhsMapper rhs(_tri, triStride);
typedef gebp_traits<Scalar,Scalar> Traits;
enum {
RhsStorageOrder = TriStorageOrder,
SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
IsLower = (Mode&Lower) == Lower
};
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*size;
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
conj_if<Conjugate> conj;
gebp_kernel<Scalar, Scalar, Index, LhsMapper, Traits::mr, Traits::nr, false, Conjugate> gebp_kernel;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder,false,true> pack_rhs_panel;
gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, ColMajor, false, true> pack_lhs_panel;
for(Index k2=IsLower ? size : 0;
IsLower ? k2>0 : k2<size;
IsLower ? k2-=kc : k2+=kc)
{
const Index actual_kc = (std::min)(IsLower ? k2 : size-k2, kc);
Index actual_k2 = IsLower ? k2-actual_kc : k2 ;
Index startPanel = IsLower ? 0 : k2+actual_kc;
Index rs = IsLower ? actual_k2 : size - actual_k2 - actual_kc;
Scalar* geb = blockB+actual_kc*actual_kc;
if (rs>0) pack_rhs(geb, rhs.getSubMapper(actual_k2,startPanel), actual_kc, rs);
// triangular packing (we only pack the panels off the diagonal,
// neglecting the blocks overlapping the diagonal
{
for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
{
Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
Index actual_j2 = actual_k2 + j2;
Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
if (panelLength>0)
pack_rhs_panel(blockB+j2*actual_kc,
rhs.getSubMapper(actual_k2+panelOffset, actual_j2),
panelLength, actualPanelWidth,
actual_kc, panelOffset);
}
}
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(mc,rows-i2);
// triangular solver kernel
{
// for each small block of the diagonal (=> vertical panels of rhs)
for (Index j2 = IsLower
? (actual_kc - ((actual_kc%SmallPanelWidth) ? Index(actual_kc%SmallPanelWidth)
: Index(SmallPanelWidth)))
: 0;
IsLower ? j2>=0 : j2<actual_kc;
IsLower ? j2-=SmallPanelWidth : j2+=SmallPanelWidth)
{
Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
Index absolute_j2 = actual_k2 + j2;
Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2;
// GEBP
if(panelLength>0)
{
gebp_kernel(lhs.getSubMapper(i2,absolute_j2),
blockA, blockB+j2*actual_kc,
actual_mc, panelLength, actualPanelWidth,
Scalar(-1),
actual_kc, actual_kc, // strides
panelOffset, panelOffset); // offsets
}
// unblocked triangular solve
for (Index k=0; k<actualPanelWidth; ++k)
{
Index j = IsLower ? absolute_j2+actualPanelWidth-k-1 : absolute_j2+k;
Scalar* r = &lhs(i2,j);
for (Index k3=0; k3<k; ++k3)
{
Scalar b = conj(rhs(IsLower ? j+1+k3 : absolute_j2+k3,j));
Scalar* a = &lhs(i2,IsLower ? j+1+k3 : absolute_j2+k3);
for (Index i=0; i<actual_mc; ++i)
r[i] -= a[i] * b;
}
if((Mode & UnitDiag)==0)
{
Scalar inv_rjj = RealScalar(1)/conj(rhs(j,j));
for (Index i=0; i<actual_mc; ++i)
r[i] *= inv_rjj;
}
}
// pack the just computed part of lhs to A
pack_lhs_panel(blockA, LhsMapper(_other+absolute_j2*otherStride+i2, otherStride),
actualPanelWidth, actual_mc,
actual_kc, j2);
}
}
if (rs>0)
gebp_kernel(lhs.getSubMapper(i2, startPanel), blockA, geb,
actual_mc, actual_kc, rs, Scalar(-1),
-1, -1, 0, 0);
}
}
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_H
| 13,979 | 40.607143 | 145 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to BLAS F77
* Triangular matrix * matrix product functionality based on ?TRMM.
********************************************************************************
*/
#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H
#define EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H
namespace Eigen {
namespace internal {
// implements LeftSide op(triangular)^-1 * general
#define EIGEN_BLAS_TRSM_L(EIGTYPE, BLASTYPE, BLASPREFIX) \
template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor> \
{ \
enum { \
IsLower = (Mode&Lower) == Lower, \
IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
}; \
static void run( \
Index size, Index otherSize, \
const EIGTYPE* _tri, Index triStride, \
EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
{ \
BlasIndex m = convert_index<BlasIndex>(size), n = convert_index<BlasIndex>(otherSize), lda, ldb; \
char side = 'L', uplo, diag='N', transa; \
/* Set alpha_ */ \
EIGTYPE alpha(1); \
ldb = convert_index<BlasIndex>(otherStride);\
\
const EIGTYPE *a; \
/* Set trans */ \
transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \
/* Set uplo */ \
uplo = IsLower ? 'L' : 'U'; \
if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
/* Set a, lda */ \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, TriStorageOrder> MatrixTri; \
Map<const MatrixTri, 0, OuterStride<> > tri(_tri,size,size,OuterStride<>(triStride)); \
MatrixTri a_tmp; \
\
if (conjA) { \
a_tmp = tri.conjugate(); \
a = a_tmp.data(); \
lda = convert_index<BlasIndex>(a_tmp.outerStride()); \
} else { \
a = _tri; \
lda = convert_index<BlasIndex>(triStride); \
} \
if (IsUnitDiag) diag='U'; \
/* call ?trsm*/ \
BLASPREFIX##trsm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \
} \
};
EIGEN_BLAS_TRSM_L(double, double, d)
EIGEN_BLAS_TRSM_L(dcomplex, double, z)
EIGEN_BLAS_TRSM_L(float, float, s)
EIGEN_BLAS_TRSM_L(scomplex, float, c)
// implements RightSide general * op(triangular)^-1
#define EIGEN_BLAS_TRSM_R(EIGTYPE, BLASTYPE, BLASPREFIX) \
template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor> \
{ \
enum { \
IsLower = (Mode&Lower) == Lower, \
IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
}; \
static void run( \
Index size, Index otherSize, \
const EIGTYPE* _tri, Index triStride, \
EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
{ \
BlasIndex m = convert_index<BlasIndex>(otherSize), n = convert_index<BlasIndex>(size), lda, ldb; \
char side = 'R', uplo, diag='N', transa; \
/* Set alpha_ */ \
EIGTYPE alpha(1); \
ldb = convert_index<BlasIndex>(otherStride);\
\
const EIGTYPE *a; \
/* Set trans */ \
transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \
/* Set uplo */ \
uplo = IsLower ? 'L' : 'U'; \
if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
/* Set a, lda */ \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, TriStorageOrder> MatrixTri; \
Map<const MatrixTri, 0, OuterStride<> > tri(_tri,size,size,OuterStride<>(triStride)); \
MatrixTri a_tmp; \
\
if (conjA) { \
a_tmp = tri.conjugate(); \
a = a_tmp.data(); \
lda = convert_index<BlasIndex>(a_tmp.outerStride()); \
} else { \
a = _tri; \
lda = convert_index<BlasIndex>(triStride); \
} \
if (IsUnitDiag) diag='U'; \
/* call ?trsm*/ \
BLASPREFIX##trsm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \
/*std::cout << "TRMS_L specialization!\n";*/ \
} \
};
EIGEN_BLAS_TRSM_R(double, double, d)
EIGEN_BLAS_TRSM_R(dcomplex, double, z)
EIGEN_BLAS_TRSM_R(float, float, s)
EIGEN_BLAS_TRSM_R(scomplex, float, c)
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H
| 6,024 | 38.638158 | 138 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/products/TriangularSolverVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H
#define EIGEN_TRIANGULAR_SOLVER_VECTOR_H
namespace Eigen {
namespace internal {
template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate, int StorageOrder>
struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheRight, Mode, Conjugate, StorageOrder>
{
static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)
{
triangular_solve_vector<LhsScalar,RhsScalar,Index,OnTheLeft,
((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),
Conjugate,StorageOrder==RowMajor?ColMajor:RowMajor
>::run(size, _lhs, lhsStride, rhs);
}
};
// forward and backward substitution, row-major, rhs is a vector
template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate>
struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, RowMajor>
{
enum {
IsLower = ((Mode&Lower)==Lower)
};
static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)
{
typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap;
const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));
typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper;
typename internal::conditional<
Conjugate,
const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,
const LhsMap&>
::type cjLhs(lhs);
static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
for(Index pi=IsLower ? 0 : size;
IsLower ? pi<size : pi>0;
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
{
Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
Index r = IsLower ? pi : size - pi; // remaining size
if (r > 0)
{
// let's directly call the low level product function because:
// 1 - it is faster to compile
// 2 - it is slighlty faster at runtime
Index startRow = IsLower ? pi : pi-actualPanelWidth;
Index startCol = IsLower ? 0 : pi;
general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,Conjugate,RhsScalar,RhsMapper,false>::run(
actualPanelWidth, r,
LhsMapper(&lhs.coeffRef(startRow,startCol), lhsStride),
RhsMapper(rhs + startCol, 1),
rhs + startRow, 1,
RhsScalar(-1));
}
for(Index k=0; k<actualPanelWidth; ++k)
{
Index i = IsLower ? pi+k : pi-k-1;
Index s = IsLower ? pi : i+1;
if (k>0)
rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map<const Matrix<RhsScalar,Dynamic,1> >(rhs+s,k))).sum();
if(!(Mode & UnitDiag))
rhs[i] /= cjLhs(i,i);
}
}
}
};
// forward and backward substitution, column-major, rhs is a vector
template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate>
struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, ColMajor>
{
enum {
IsLower = ((Mode&Lower)==Lower)
};
static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)
{
typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;
const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));
typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper;
typename internal::conditional<Conjugate,
const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,
const LhsMap&
>::type cjLhs(lhs);
static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
for(Index pi=IsLower ? 0 : size;
IsLower ? pi<size : pi>0;
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
{
Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
Index startBlock = IsLower ? pi : pi-actualPanelWidth;
Index endBlock = IsLower ? pi + actualPanelWidth : 0;
for(Index k=0; k<actualPanelWidth; ++k)
{
Index i = IsLower ? pi+k : pi-k-1;
if(!(Mode & UnitDiag))
rhs[i] /= cjLhs.coeff(i,i);
Index r = actualPanelWidth - k - 1; // remaining size
Index s = IsLower ? i+1 : i-r;
if (r>0)
Map<Matrix<RhsScalar,Dynamic,1> >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r);
}
Index r = IsLower ? size - endBlock : startBlock; // remaining size
if (r > 0)
{
// let's directly call the low level product function because:
// 1 - it is faster to compile
// 2 - it is slighlty faster at runtime
general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,Conjugate,RhsScalar,RhsMapper,false>::run(
r, actualPanelWidth,
LhsMapper(&lhs.coeffRef(endBlock,startBlock), lhsStride),
RhsMapper(rhs+startBlock, 1),
rhs+endBlock, 1, RhsScalar(-1));
}
}
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRIANGULAR_SOLVER_VECTOR_H
| 5,741 | 38.328767 | 129 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/BlasUtil.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BLASUTIL_H
#define EIGEN_BLASUTIL_H
// This file contains many lightweight helper classes used to
// implement and control fast level 2 and level 3 BLAS-like routines.
namespace Eigen {
namespace internal {
// forward declarations
template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs=false, bool ConjugateRhs=false>
struct gebp_kernel;
template<typename Scalar, typename Index, typename DataMapper, int nr, int StorageOrder, bool Conjugate = false, bool PanelMode=false>
struct gemm_pack_rhs;
template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
struct gemm_pack_lhs;
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResStorageOrder>
struct general_matrix_matrix_product;
template<typename Index,
typename LhsScalar, typename LhsMapper, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version=Specialized>
struct general_matrix_vector_product;
template<bool Conjugate> struct conj_if;
template<> struct conj_if<true> {
template<typename T>
inline T operator()(const T& x) const { return numext::conj(x); }
template<typename T>
inline T pconj(const T& x) const { return internal::pconj(x); }
};
template<> struct conj_if<false> {
template<typename T>
inline const T& operator()(const T& x) const { return x; }
template<typename T>
inline const T& pconj(const T& x) const { return x; }
};
// Generic implementation for custom complex types.
template<typename LhsScalar, typename RhsScalar, bool ConjLhs, bool ConjRhs>
struct conj_helper
{
typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar>::ReturnType Scalar;
EIGEN_STRONG_INLINE Scalar pmadd(const LhsScalar& x, const RhsScalar& y, const Scalar& c) const
{ return padd(c, pmul(x,y)); }
EIGEN_STRONG_INLINE Scalar pmul(const LhsScalar& x, const RhsScalar& y) const
{ return conj_if<ConjLhs>()(x) * conj_if<ConjRhs>()(y); }
};
template<typename Scalar> struct conj_helper<Scalar,Scalar,false,false>
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return internal::pmadd(x,y,c); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const { return internal::pmul(x,y); }
};
template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, false,true>
{
typedef std::complex<RealScalar> Scalar;
EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
{ return c + pmul(x,y); }
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
{ return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::imag(x)*numext::real(y) - numext::real(x)*numext::imag(y)); }
};
template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,false>
{
typedef std::complex<RealScalar> Scalar;
EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
{ return c + pmul(x,y); }
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
{ return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
};
template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,true>
{
typedef std::complex<RealScalar> Scalar;
EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
{ return c + pmul(x,y); }
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
{ return Scalar(numext::real(x)*numext::real(y) - numext::imag(x)*numext::imag(y), - numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
};
template<typename RealScalar,bool Conj> struct conj_helper<std::complex<RealScalar>, RealScalar, Conj,false>
{
typedef std::complex<RealScalar> Scalar;
EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const RealScalar& y, const Scalar& c) const
{ return padd(c, pmul(x,y)); }
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const RealScalar& y) const
{ return conj_if<Conj>()(x)*y; }
};
template<typename RealScalar,bool Conj> struct conj_helper<RealScalar, std::complex<RealScalar>, false,Conj>
{
typedef std::complex<RealScalar> Scalar;
EIGEN_STRONG_INLINE Scalar pmadd(const RealScalar& x, const Scalar& y, const Scalar& c) const
{ return padd(c, pmul(x,y)); }
EIGEN_STRONG_INLINE Scalar pmul(const RealScalar& x, const Scalar& y) const
{ return x*conj_if<Conj>()(y); }
};
template<typename From,typename To> struct get_factor {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE To run(const From& x) { return To(x); }
};
template<typename Scalar> struct get_factor<Scalar,typename NumTraits<Scalar>::Real> {
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE typename NumTraits<Scalar>::Real run(const Scalar& x) { return numext::real(x); }
};
template<typename Scalar, typename Index>
class BlasVectorMapper {
public:
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasVectorMapper(Scalar *data) : m_data(data) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const {
return m_data[i];
}
template <typename Packet, int AlignmentType>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet load(Index i) const {
return ploadt<Packet, AlignmentType>(m_data + i);
}
template <typename Packet>
EIGEN_DEVICE_FUNC bool aligned(Index i) const {
return (UIntPtr(m_data+i)%sizeof(Packet))==0;
}
protected:
Scalar* m_data;
};
template<typename Scalar, typename Index, int AlignmentType>
class BlasLinearMapper {
public:
typedef typename packet_traits<Scalar>::type Packet;
typedef typename packet_traits<Scalar>::half HalfPacket;
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data) : m_data(data) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void prefetch(int i) const {
internal::prefetch(&operator()(i));
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar& operator()(Index i) const {
return m_data[i];
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {
return ploadt<Packet, AlignmentType>(m_data + i);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const {
return ploadt<HalfPacket, AlignmentType>(m_data + i);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const Packet &p) const {
pstoret<Scalar, Packet, AlignmentType>(m_data + i, p);
}
protected:
Scalar *m_data;
};
// Lightweight helper class to access matrix coefficients.
template<typename Scalar, typename Index, int StorageOrder, int AlignmentType = Unaligned>
class blas_data_mapper {
public:
typedef typename packet_traits<Scalar>::type Packet;
typedef typename packet_traits<Scalar>::half HalfPacket;
typedef BlasLinearMapper<Scalar, Index, AlignmentType> LinearMapper;
typedef BlasVectorMapper<Scalar, Index> VectorMapper;
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType>
getSubMapper(Index i, Index j) const {
return blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType>(&operator()(i, j), m_stride);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const {
return LinearMapper(&operator()(i, j));
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const {
return VectorMapper(&operator()(i, j));
}
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE Scalar& operator()(Index i, Index j) const {
return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride];
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const {
return ploadt<Packet, AlignmentType>(&operator()(i, j));
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {
return ploadt<HalfPacket, AlignmentType>(&operator()(i, j));
}
template<typename SubPacket>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void scatterPacket(Index i, Index j, const SubPacket &p) const {
pscatter<Scalar, SubPacket>(&operator()(i, j), p, m_stride);
}
template<typename SubPacket>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SubPacket gatherPacket(Index i, Index j) const {
return pgather<Scalar, SubPacket>(&operator()(i, j), m_stride);
}
EIGEN_DEVICE_FUNC const Index stride() const { return m_stride; }
EIGEN_DEVICE_FUNC const Scalar* data() const { return m_data; }
EIGEN_DEVICE_FUNC Index firstAligned(Index size) const {
if (UIntPtr(m_data)%sizeof(Scalar)) {
return -1;
}
return internal::first_default_aligned(m_data, size);
}
protected:
Scalar* EIGEN_RESTRICT m_data;
const Index m_stride;
};
// lightweight helper class to access matrix coefficients (const version)
template<typename Scalar, typename Index, int StorageOrder>
class const_blas_data_mapper : public blas_data_mapper<const Scalar, Index, StorageOrder> {
public:
EIGEN_ALWAYS_INLINE const_blas_data_mapper(const Scalar *data, Index stride) : blas_data_mapper<const Scalar, Index, StorageOrder>(data, stride) {}
EIGEN_ALWAYS_INLINE const_blas_data_mapper<Scalar, Index, StorageOrder> getSubMapper(Index i, Index j) const {
return const_blas_data_mapper<Scalar, Index, StorageOrder>(&(this->operator()(i, j)), this->m_stride);
}
};
/* Helper class to analyze the factors of a Product expression.
* In particular it allows to pop out operator-, scalar multiples,
* and conjugate */
template<typename XprType> struct blas_traits
{
typedef typename traits<XprType>::Scalar Scalar;
typedef const XprType& ExtractType;
typedef XprType _ExtractType;
enum {
IsComplex = NumTraits<Scalar>::IsComplex,
IsTransposed = false,
NeedToConjugate = false,
HasUsableDirectAccess = ( (int(XprType::Flags)&DirectAccessBit)
&& ( bool(XprType::IsVectorAtCompileTime)
|| int(inner_stride_at_compile_time<XprType>::ret) == 1)
) ? 1 : 0
};
typedef typename conditional<bool(HasUsableDirectAccess),
ExtractType,
typename _ExtractType::PlainObject
>::type DirectLinearAccessType;
static inline ExtractType extract(const XprType& x) { return x; }
static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); }
};
// pop conjugate
template<typename Scalar, typename NestedXpr>
struct blas_traits<CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> >
: blas_traits<NestedXpr>
{
typedef blas_traits<NestedXpr> Base;
typedef CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
enum {
IsComplex = NumTraits<Scalar>::IsComplex,
NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex
};
static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
static inline Scalar extractScalarFactor(const XprType& x) { return conj(Base::extractScalarFactor(x.nestedExpression())); }
};
// pop scalar multiple
template<typename Scalar, typename NestedXpr, typename Plain>
struct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain>, NestedXpr> >
: blas_traits<NestedXpr>
{
typedef blas_traits<NestedXpr> Base;
typedef CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
static inline ExtractType extract(const XprType& x) { return Base::extract(x.rhs()); }
static inline Scalar extractScalarFactor(const XprType& x)
{ return x.lhs().functor().m_other * Base::extractScalarFactor(x.rhs()); }
};
template<typename Scalar, typename NestedXpr, typename Plain>
struct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, NestedXpr, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain> > >
: blas_traits<NestedXpr>
{
typedef blas_traits<NestedXpr> Base;
typedef CwiseBinaryOp<scalar_product_op<Scalar>, NestedXpr, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain> > XprType;
typedef typename Base::ExtractType ExtractType;
static inline ExtractType extract(const XprType& x) { return Base::extract(x.lhs()); }
static inline Scalar extractScalarFactor(const XprType& x)
{ return Base::extractScalarFactor(x.lhs()) * x.rhs().functor().m_other; }
};
template<typename Scalar, typename Plain1, typename Plain2>
struct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain1>,
const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain2> > >
: blas_traits<CwiseNullaryOp<scalar_constant_op<Scalar>,Plain1> >
{};
// pop opposite
template<typename Scalar, typename NestedXpr>
struct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> >
: blas_traits<NestedXpr>
{
typedef blas_traits<NestedXpr> Base;
typedef CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
static inline Scalar extractScalarFactor(const XprType& x)
{ return - Base::extractScalarFactor(x.nestedExpression()); }
};
// pop/push transpose
template<typename NestedXpr>
struct blas_traits<Transpose<NestedXpr> >
: blas_traits<NestedXpr>
{
typedef typename NestedXpr::Scalar Scalar;
typedef blas_traits<NestedXpr> Base;
typedef Transpose<NestedXpr> XprType;
typedef Transpose<const typename Base::_ExtractType> ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS
typedef Transpose<const typename Base::_ExtractType> _ExtractType;
typedef typename conditional<bool(Base::HasUsableDirectAccess),
ExtractType,
typename ExtractType::PlainObject
>::type DirectLinearAccessType;
enum {
IsTransposed = Base::IsTransposed ? 0 : 1
};
static inline ExtractType extract(const XprType& x) { return ExtractType(Base::extract(x.nestedExpression())); }
static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); }
};
template<typename T>
struct blas_traits<const T>
: blas_traits<T>
{};
template<typename T, bool HasUsableDirectAccess=blas_traits<T>::HasUsableDirectAccess>
struct extract_data_selector {
static const typename T::Scalar* run(const T& m)
{
return blas_traits<T>::extract(m).data();
}
};
template<typename T>
struct extract_data_selector<T,false> {
static typename T::Scalar* run(const T&) { return 0; }
};
template<typename T> const typename T::Scalar* extract_data(const T& m)
{
return extract_data_selector<T>::run(m);
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_BLASUTIL_H
| 15,722 | 38.406015 | 156 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/Constants.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
// Copyright (C) 2007-2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CONSTANTS_H
#define EIGEN_CONSTANTS_H
namespace Eigen {
/** This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is
* stored in some runtime variable.
*
* Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix.
*/
const int Dynamic = -1;
/** This value means that a signed quantity (e.g., a signed index) is not known at compile-time, and that instead its value
* has to be specified at runtime.
*/
const int DynamicIndex = 0xffffff;
/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm<int>().
* The value Infinity there means the L-infinity norm.
*/
const int Infinity = -1;
/** This value means that the cost to evaluate an expression coefficient is either very expensive or
* cannot be known at compile time.
*
* This value has to be positive to (1) simplify cost computation, and (2) allow to distinguish between a very expensive and very very expensive expressions.
* It thus must also be large enough to make sure unrolling won't happen and that sub expressions will be evaluated, but not too large to avoid overflow.
*/
const int HugeCost = 10000;
/** \defgroup flags Flags
* \ingroup Core_Module
*
* These are the possible bits which can be OR'ed to constitute the flags of a matrix or
* expression.
*
* It is important to note that these flags are a purely compile-time notion. They are a compile-time property of
* an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any
* runtime overhead.
*
* \sa MatrixBase::Flags
*/
/** \ingroup flags
*
* for a matrix, this means that the storage order is row-major.
* If this bit is not set, the storage order is column-major.
* For an expression, this determines the storage order of
* the matrix created by evaluation of that expression.
* \sa \blank \ref TopicStorageOrders */
const unsigned int RowMajorBit = 0x1;
/** \ingroup flags
* means the expression should be evaluated by the calling expression */
const unsigned int EvalBeforeNestingBit = 0x2;
/** \ingroup flags
* \deprecated
* means the expression should be evaluated before any assignment */
EIGEN_DEPRECATED
const unsigned int EvalBeforeAssigningBit = 0x4; // FIXME deprecated
/** \ingroup flags
*
* Short version: means the expression might be vectorized
*
* Long version: means that the coefficients can be handled by packets
* and start at a memory location whose alignment meets the requirements
* of the present CPU architecture for optimized packet access. In the fixed-size
* case, there is the additional condition that it be possible to access all the
* coefficients by packets (this implies the requirement that the size be a multiple of 16 bytes,
* and that any nontrivial strides don't break the alignment). In the dynamic-size case,
* there is no such condition on the total size and strides, so it might not be possible to access
* all coeffs by packets.
*
* \note This bit can be set regardless of whether vectorization is actually enabled.
* To check for actual vectorizability, see \a ActualPacketAccessBit.
*/
const unsigned int PacketAccessBit = 0x8;
#ifdef EIGEN_VECTORIZE
/** \ingroup flags
*
* If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant
* is set to the value \a PacketAccessBit.
*
* If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant
* is set to the value 0.
*/
const unsigned int ActualPacketAccessBit = PacketAccessBit;
#else
const unsigned int ActualPacketAccessBit = 0x0;
#endif
/** \ingroup flags
*
* Short version: means the expression can be seen as 1D vector.
*
* Long version: means that one can access the coefficients
* of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These
* index-based access methods are guaranteed
* to not have to do any runtime computation of a (row, col)-pair from the index, so that it
* is guaranteed that whenever it is available, index-based access is at least as fast as
* (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit.
*
* If both PacketAccessBit and LinearAccessBit are set, then the
* packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a
* lvalue expression.
*
* Typically, all vector expressions have the LinearAccessBit, but there is one exception:
* Product expressions don't have it, because it would be troublesome for vectorization, even when the
* Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but
* not index-based packet access, so they don't have the LinearAccessBit.
*/
const unsigned int LinearAccessBit = 0x10;
/** \ingroup flags
*
* Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable.
* This rules out read-only expressions.
*
* Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but note
* the other:
* \li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit
* \li Map-to-const expressions, for example Map<const Matrix>, have DirectAccessBit but not LvalueBit
*
* Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value.
*/
const unsigned int LvalueBit = 0x20;
/** \ingroup flags
*
* Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout
* of the array of coefficients must be exactly the natural one suggested by rows(), cols(),
* outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients,
* though referencable, do not have such a regular memory layout.
*
* See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal.
*/
const unsigned int DirectAccessBit = 0x40;
/** \deprecated \ingroup flags
*
* means the first coefficient packet is guaranteed to be aligned.
* An expression cannot has the AlignedBit without the PacketAccessBit flag.
* In other words, this means we are allow to perform an aligned packet access to the first element regardless
* of the expression kind:
* \code
* expression.packet<Aligned>(0);
* \endcode
*/
EIGEN_DEPRECATED const unsigned int AlignedBit = 0x80;
const unsigned int NestByRefBit = 0x100;
/** \ingroup flags
*
* for an expression, this means that the storage order
* can be either row-major or column-major.
* The precise choice will be decided at evaluation time or when
* combined with other expressions.
* \sa \blank \ref RowMajorBit, \ref TopicStorageOrders */
const unsigned int NoPreferredStorageOrderBit = 0x200;
/** \ingroup flags
*
* Means that the underlying coefficients can be accessed through pointers to the sparse (un)compressed storage format,
* that is, the expression provides:
* \code
inline const Scalar* valuePtr() const;
inline const Index* innerIndexPtr() const;
inline const Index* outerIndexPtr() const;
inline const Index* innerNonZeroPtr() const;
\endcode
*/
const unsigned int CompressedAccessBit = 0x400;
// list of flags that are inherited by default
const unsigned int HereditaryBits = RowMajorBit
| EvalBeforeNestingBit;
/** \defgroup enums Enumerations
* \ingroup Core_Module
*
* Various enumerations used in %Eigen. Many of these are used as template parameters.
*/
/** \ingroup enums
* Enum containing possible values for the \c Mode or \c UpLo parameter of
* MatrixBase::selfadjointView() and MatrixBase::triangularView(), and selfadjoint solvers. */
enum UpLoType {
/** View matrix as a lower triangular matrix. */
Lower=0x1,
/** View matrix as an upper triangular matrix. */
Upper=0x2,
/** %Matrix has ones on the diagonal; to be used in combination with #Lower or #Upper. */
UnitDiag=0x4,
/** %Matrix has zeros on the diagonal; to be used in combination with #Lower or #Upper. */
ZeroDiag=0x8,
/** View matrix as a lower triangular matrix with ones on the diagonal. */
UnitLower=UnitDiag|Lower,
/** View matrix as an upper triangular matrix with ones on the diagonal. */
UnitUpper=UnitDiag|Upper,
/** View matrix as a lower triangular matrix with zeros on the diagonal. */
StrictlyLower=ZeroDiag|Lower,
/** View matrix as an upper triangular matrix with zeros on the diagonal. */
StrictlyUpper=ZeroDiag|Upper,
/** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */
SelfAdjoint=0x10,
/** Used to support symmetric, non-selfadjoint, complex matrices. */
Symmetric=0x20
};
/** \ingroup enums
* Enum for indicating whether a buffer is aligned or not. */
enum AlignmentType {
Unaligned=0, /**< Data pointer has no specific alignment. */
Aligned8=8, /**< Data pointer is aligned on a 8 bytes boundary. */
Aligned16=16, /**< Data pointer is aligned on a 16 bytes boundary. */
Aligned32=32, /**< Data pointer is aligned on a 32 bytes boundary. */
Aligned64=64, /**< Data pointer is aligned on a 64 bytes boundary. */
Aligned128=128, /**< Data pointer is aligned on a 128 bytes boundary. */
AlignedMask=255,
Aligned=16, /**< \deprecated Synonym for Aligned16. */
#if EIGEN_MAX_ALIGN_BYTES==128
AlignedMax = Aligned128
#elif EIGEN_MAX_ALIGN_BYTES==64
AlignedMax = Aligned64
#elif EIGEN_MAX_ALIGN_BYTES==32
AlignedMax = Aligned32
#elif EIGEN_MAX_ALIGN_BYTES==16
AlignedMax = Aligned16
#elif EIGEN_MAX_ALIGN_BYTES==8
AlignedMax = Aligned8
#elif EIGEN_MAX_ALIGN_BYTES==0
AlignedMax = Unaligned
#else
#error Invalid value for EIGEN_MAX_ALIGN_BYTES
#endif
};
/** \ingroup enums
* Enum used by DenseBase::corner() in Eigen2 compatibility mode. */
// FIXME after the corner() API change, this was not needed anymore, except by AlignedBox
// TODO: find out what to do with that. Adapt the AlignedBox API ?
enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };
/** \ingroup enums
* Enum containing possible values for the \p Direction parameter of
* Reverse, PartialReduxExpr and VectorwiseOp. */
enum DirectionType {
/** For Reverse, all columns are reversed;
* for PartialReduxExpr and VectorwiseOp, act on columns. */
Vertical,
/** For Reverse, all rows are reversed;
* for PartialReduxExpr and VectorwiseOp, act on rows. */
Horizontal,
/** For Reverse, both rows and columns are reversed;
* not used for PartialReduxExpr and VectorwiseOp. */
BothDirections
};
/** \internal \ingroup enums
* Enum to specify how to traverse the entries of a matrix. */
enum TraversalType {
/** \internal Default traversal, no vectorization, no index-based access */
DefaultTraversal,
/** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */
LinearTraversal,
/** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment
* and good size */
InnerVectorizedTraversal,
/** \internal Vectorization path using a single loop plus scalar loops for the
* unaligned boundaries */
LinearVectorizedTraversal,
/** \internal Generic vectorization path using one vectorized loop per row/column with some
* scalar loops to handle the unaligned boundaries */
SliceVectorizedTraversal,
/** \internal Special case to properly handle incompatible scalar types or other defecting cases*/
InvalidTraversal,
/** \internal Evaluate all entries at once */
AllAtOnceTraversal
};
/** \internal \ingroup enums
* Enum to specify whether to unroll loops when traversing over the entries of a matrix. */
enum UnrollingType {
/** \internal Do not unroll loops. */
NoUnrolling,
/** \internal Unroll only the inner loop, but not the outer loop. */
InnerUnrolling,
/** \internal Unroll both the inner and the outer loop. If there is only one loop,
* because linear traversal is used, then unroll that loop. */
CompleteUnrolling
};
/** \internal \ingroup enums
* Enum to specify whether to use the default (built-in) implementation or the specialization. */
enum SpecializedType {
Specialized,
BuiltIn
};
/** \ingroup enums
* Enum containing possible values for the \p _Options template parameter of
* Matrix, Array and BandMatrix. */
enum StorageOptions {
/** Storage order is column major (see \ref TopicStorageOrders). */
ColMajor = 0,
/** Storage order is row major (see \ref TopicStorageOrders). */
RowMajor = 0x1, // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that
/** Align the matrix itself if it is vectorizable fixed-size */
AutoAlign = 0,
/** Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation
DontAlign = 0x2
};
/** \ingroup enums
* Enum for specifying whether to apply or solve on the left or right. */
enum SideType {
/** Apply transformation on the left. */
OnTheLeft = 1,
/** Apply transformation on the right. */
OnTheRight = 2
};
/* the following used to be written as:
*
* struct NoChange_t {};
* namespace {
* EIGEN_UNUSED NoChange_t NoChange;
* }
*
* on the ground that it feels dangerous to disambiguate overloaded functions on enum/integer types.
* However, this leads to "variable declared but never referenced" warnings on Intel Composer XE,
* and we do not know how to get rid of them (bug 450).
*/
enum NoChange_t { NoChange };
enum Sequential_t { Sequential };
enum Default_t { Default };
/** \internal \ingroup enums
* Used in AmbiVector. */
enum AmbiVectorMode {
IsDense = 0,
IsSparse
};
/** \ingroup enums
* Used as template parameter in DenseCoeffBase and MapBase to indicate
* which accessors should be provided. */
enum AccessorLevels {
/** Read-only access via a member function. */
ReadOnlyAccessors,
/** Read/write access via member functions. */
WriteAccessors,
/** Direct read-only access to the coefficients. */
DirectAccessors,
/** Direct read/write access to the coefficients. */
DirectWriteAccessors
};
/** \ingroup enums
* Enum with options to give to various decompositions. */
enum DecompositionOptions {
/** \internal Not used (meant for LDLT?). */
Pivoting = 0x01,
/** \internal Not used (meant for LDLT?). */
NoPivoting = 0x02,
/** Used in JacobiSVD to indicate that the square matrix U is to be computed. */
ComputeFullU = 0x04,
/** Used in JacobiSVD to indicate that the thin matrix U is to be computed. */
ComputeThinU = 0x08,
/** Used in JacobiSVD to indicate that the square matrix V is to be computed. */
ComputeFullV = 0x10,
/** Used in JacobiSVD to indicate that the thin matrix V is to be computed. */
ComputeThinV = 0x20,
/** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
* that only the eigenvalues are to be computed and not the eigenvectors. */
EigenvaluesOnly = 0x40,
/** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
* that both the eigenvalues and the eigenvectors are to be computed. */
ComputeEigenvectors = 0x80,
/** \internal */
EigVecMask = EigenvaluesOnly | ComputeEigenvectors,
/** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
* solve the generalized eigenproblem \f$ Ax = \lambda B x \f$. */
Ax_lBx = 0x100,
/** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
* solve the generalized eigenproblem \f$ ABx = \lambda x \f$. */
ABx_lx = 0x200,
/** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
* solve the generalized eigenproblem \f$ BAx = \lambda x \f$. */
BAx_lx = 0x400,
/** \internal */
GenEigMask = Ax_lBx | ABx_lx | BAx_lx
};
/** \ingroup enums
* Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */
enum QRPreconditioners {
/** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */
NoQRPreconditioner,
/** Use a QR decomposition without pivoting as the first step. */
HouseholderQRPreconditioner,
/** Use a QR decomposition with column pivoting as the first step. */
ColPivHouseholderQRPreconditioner,
/** Use a QR decomposition with full pivoting as the first step. */
FullPivHouseholderQRPreconditioner
};
#ifdef Success
#error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h
#endif
/** \ingroup enums
* Enum for reporting the status of a computation. */
enum ComputationInfo {
/** Computation was successful. */
Success = 0,
/** The provided data did not satisfy the prerequisites. */
NumericalIssue = 1,
/** Iterative procedure did not converge. */
NoConvergence = 2,
/** The inputs are invalid, or the algorithm has been improperly called.
* When assertions are enabled, such errors trigger an assert. */
InvalidInput = 3
};
/** \ingroup enums
* Enum used to specify how a particular transformation is stored in a matrix.
* \sa Transform, Hyperplane::transform(). */
enum TransformTraits {
/** Transformation is an isometry. */
Isometry = 0x1,
/** Transformation is an affine transformation stored as a (Dim+1)^2 matrix whose last row is
* assumed to be [0 ... 0 1]. */
Affine = 0x2,
/** Transformation is an affine transformation stored as a (Dim) x (Dim+1) matrix. */
AffineCompact = 0x10 | Affine,
/** Transformation is a general projective transformation stored as a (Dim+1)^2 matrix. */
Projective = 0x20
};
/** \internal \ingroup enums
* Enum used to choose between implementation depending on the computer architecture. */
namespace Architecture
{
enum Type {
Generic = 0x0,
SSE = 0x1,
AltiVec = 0x2,
VSX = 0x3,
NEON = 0x4,
#if defined EIGEN_VECTORIZE_SSE
Target = SSE
#elif defined EIGEN_VECTORIZE_ALTIVEC
Target = AltiVec
#elif defined EIGEN_VECTORIZE_VSX
Target = VSX
#elif defined EIGEN_VECTORIZE_NEON
Target = NEON
#else
Target = Generic
#endif
};
}
/** \internal \ingroup enums
* Enum used as template parameter in Product and product evaluators. */
enum ProductImplType
{ DefaultProduct=0, LazyProduct, AliasFreeProduct, CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
/** \internal \ingroup enums
* Enum used in experimental parallel implementation. */
enum Action {GetAction, SetAction};
/** The type used to identify a dense storage. */
struct Dense {};
/** The type used to identify a general sparse storage. */
struct Sparse {};
/** The type used to identify a general solver (factored) storage. */
struct SolverStorage {};
/** The type used to identify a permutation storage. */
struct PermutationStorage {};
/** The type used to identify a permutation storage. */
struct TranspositionsStorage {};
/** The type used to identify a matrix expression */
struct MatrixXpr {};
/** The type used to identify an array expression */
struct ArrayXpr {};
// An evaluator must define its shape. By default, it can be one of the following:
struct DenseShape { static std::string debugName() { return "DenseShape"; } };
struct SolverShape { static std::string debugName() { return "SolverShape"; } };
struct HomogeneousShape { static std::string debugName() { return "HomogeneousShape"; } };
struct DiagonalShape { static std::string debugName() { return "DiagonalShape"; } };
struct BandShape { static std::string debugName() { return "BandShape"; } };
struct TriangularShape { static std::string debugName() { return "TriangularShape"; } };
struct SelfAdjointShape { static std::string debugName() { return "SelfAdjointShape"; } };
struct PermutationShape { static std::string debugName() { return "PermutationShape"; } };
struct TranspositionsShape { static std::string debugName() { return "TranspositionsShape"; } };
struct SparseShape { static std::string debugName() { return "SparseShape"; } };
namespace internal {
// random access iterators based on coeff*() accessors.
struct IndexBased {};
// evaluator based on iterators to access coefficients.
struct IteratorBased {};
/** \internal
* Constants for comparison functors
*/
enum ComparisonName {
cmp_EQ = 0,
cmp_LT = 1,
cmp_LE = 2,
cmp_UNORD = 3,
cmp_NEQ = 4,
cmp_GT = 5,
cmp_GE = 6
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CONSTANTS_H
| 21,579 | 38.379562 | 181 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/DisableStupidWarnings.h
|
#ifndef EIGEN_WARNINGS_DISABLED
#define EIGEN_WARNINGS_DISABLED
#ifdef _MSC_VER
// 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p))
// 4101 - unreferenced local variable
// 4127 - conditional expression is constant
// 4181 - qualifier applied to reference type ignored
// 4211 - nonstandard extension used : redefined extern to static
// 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data
// 4273 - QtAlignedMalloc, inconsistent DLL linkage
// 4324 - structure was padded due to declspec(align())
// 4503 - decorated name length exceeded, name was truncated
// 4512 - assignment operator could not be generated
// 4522 - 'class' : multiple assignment operators specified
// 4700 - uninitialized local variable 'xyz' used
// 4714 - function marked as __forceinline not inlined
// 4717 - 'function' : recursive on all control paths, function will cause runtime stack overflow
// 4800 - 'type' : forcing value to bool 'true' or 'false' (performance warning)
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
#pragma warning( push )
#endif
#pragma warning( disable : 4100 4101 4127 4181 4211 4244 4273 4324 4503 4512 4522 4700 4714 4717 4800)
#elif defined __INTEL_COMPILER
// 2196 - routine is both "inline" and "noinline" ("noinline" assumed)
// ICC 12 generates this warning even without any inline keyword, when defining class methods 'inline' i.e. inside of class body
// typedef that may be a reference type.
// 279 - controlling expression is constant
// ICC 12 generates this warning on assert(constant_expression_depending_on_template_params) and frankly this is a legitimate use case.
// 1684 - conversion from pointer to same-sized integral type (potential portability problem)
// 2259 - non-pointer conversion from "Eigen::Index={ptrdiff_t={long}}" to "int" may lose significant bits
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
#pragma warning push
#endif
#pragma warning disable 2196 279 1684 2259
#elif defined __clang__
// -Wconstant-logical-operand - warning: use of logical && with constant operand; switch to bitwise & or remove constant
// this is really a stupid warning as it warns on compile-time expressions involving enums
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
#pragma clang diagnostic push
#endif
#pragma clang diagnostic ignored "-Wconstant-logical-operand"
#elif defined __GNUC__ && __GNUC__>=6
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
#pragma GCC diagnostic push
#endif
#pragma GCC diagnostic ignored "-Wignored-attributes"
#endif
#if defined __NVCC__
// Disable the "statement is unreachable" message
#pragma diag_suppress code_is_unreachable
// Disable the "dynamic initialization in unreachable code" message
#pragma diag_suppress initialization_not_reachable
// Disable the "invalid error number" message that we get with older versions of nvcc
#pragma diag_suppress 1222
// Disable the "calling a __host__ function from a __host__ __device__ function is not allowed" messages (yes, there are many of them and they seem to change with every version of the compiler)
#pragma diag_suppress 2527
#pragma diag_suppress 2529
#pragma diag_suppress 2651
#pragma diag_suppress 2653
#pragma diag_suppress 2668
#pragma diag_suppress 2669
#pragma diag_suppress 2670
#pragma diag_suppress 2671
#pragma diag_suppress 2735
#pragma diag_suppress 2737
#endif
#endif // not EIGEN_WARNINGS_DISABLED
| 3,564 | 45.907895 | 195 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/ForwardDeclarations.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2007-2010 Benoit Jacob <[email protected]>
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_FORWARDDECLARATIONS_H
#define EIGEN_FORWARDDECLARATIONS_H
namespace Eigen {
namespace internal {
template<typename T> struct traits;
// here we say once and for all that traits<const T> == traits<T>
// When constness must affect traits, it has to be constness on template parameters on which T itself depends.
// For example, traits<Map<const T> > != traits<Map<T> >, but
// traits<const Map<T> > == traits<Map<T> >
template<typename T> struct traits<const T> : traits<T> {};
template<typename Derived> struct has_direct_access
{
enum { ret = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0 };
};
template<typename Derived> struct accessors_level
{
enum { has_direct_access = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0,
has_write_access = (traits<Derived>::Flags & LvalueBit) ? 1 : 0,
value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors)
: (has_write_access ? WriteAccessors : ReadOnlyAccessors)
};
};
template<typename T> struct evaluator_traits;
template< typename T> struct evaluator;
} // end namespace internal
template<typename T> struct NumTraits;
template<typename Derived> struct EigenBase;
template<typename Derived> class DenseBase;
template<typename Derived> class PlainObjectBase;
template<typename Derived,
int Level = internal::accessors_level<Derived>::value >
class DenseCoeffsBase;
template<typename _Scalar, int _Rows, int _Cols,
int _Options = AutoAlign |
#if EIGEN_GNUC_AT(3,4)
// workaround a bug in at least gcc 3.4.6
// the innermost ?: ternary operator is misparsed. We write it slightly
// differently and this makes gcc 3.4.6 happy, but it's ugly.
// The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
// (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
: !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
: Eigen::ColMajor ),
#else
( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
: (_Cols==1 && _Rows!=1) ? Eigen::ColMajor
: EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
#endif
int _MaxRows = _Rows,
int _MaxCols = _Cols
> class Matrix;
template<typename Derived> class MatrixBase;
template<typename Derived> class ArrayBase;
template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;
template<typename ExpressionType, template <typename> class StorageBase > class NoAlias;
template<typename ExpressionType> class NestByValue;
template<typename ExpressionType> class ForceAlignedAccess;
template<typename ExpressionType> class SwapWrapper;
template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false> class Block;
template<typename MatrixType, int Size=Dynamic> class VectorBlock;
template<typename MatrixType> class Transpose;
template<typename MatrixType> class Conjugate;
template<typename NullaryOp, typename MatrixType> class CwiseNullaryOp;
template<typename UnaryOp, typename MatrixType> class CwiseUnaryOp;
template<typename ViewOp, typename MatrixType> class CwiseUnaryView;
template<typename BinaryOp, typename Lhs, typename Rhs> class CwiseBinaryOp;
template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3> class CwiseTernaryOp;
template<typename Decomposition, typename Rhstype> class Solve;
template<typename XprType> class Inverse;
template<typename Lhs, typename Rhs, int Option = DefaultProduct> class Product;
template<typename Derived> class DiagonalBase;
template<typename _DiagonalVectorType> class DiagonalWrapper;
template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime=SizeAtCompileTime> class DiagonalMatrix;
template<typename MatrixType, typename DiagonalType, int ProductOrder> class DiagonalProduct;
template<typename MatrixType, int Index = 0> class Diagonal;
template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class PermutationMatrix;
template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class Transpositions;
template<typename Derived> class PermutationBase;
template<typename Derived> class TranspositionsBase;
template<typename _IndicesType> class PermutationWrapper;
template<typename _IndicesType> class TranspositionsWrapper;
template<typename Derived,
int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
> class MapBase;
template<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;
template<int Value = Dynamic> class InnerStride;
template<int Value = Dynamic> class OuterStride;
template<typename MatrixType, int MapOptions=Unaligned, typename StrideType = Stride<0,0> > class Map;
template<typename Derived> class RefBase;
template<typename PlainObjectType, int Options = 0,
typename StrideType = typename internal::conditional<PlainObjectType::IsVectorAtCompileTime,InnerStride<1>,OuterStride<> >::type > class Ref;
template<typename Derived> class TriangularBase;
template<typename MatrixType, unsigned int Mode> class TriangularView;
template<typename MatrixType, unsigned int Mode> class SelfAdjointView;
template<typename MatrixType> class SparseView;
template<typename ExpressionType> class WithFormat;
template<typename MatrixType> struct CommaInitializer;
template<typename Derived> class ReturnByValue;
template<typename ExpressionType> class ArrayWrapper;
template<typename ExpressionType> class MatrixWrapper;
template<typename Derived> class SolverBase;
template<typename XprType> class InnerIterator;
namespace internal {
template<typename DecompositionType> struct kernel_retval_base;
template<typename DecompositionType> struct kernel_retval;
template<typename DecompositionType> struct image_retval_base;
template<typename DecompositionType> struct image_retval;
} // end namespace internal
namespace internal {
template<typename _Scalar, int Rows=Dynamic, int Cols=Dynamic, int Supers=Dynamic, int Subs=Dynamic, int Options=0> class BandMatrix;
}
namespace internal {
template<typename Lhs, typename Rhs> struct product_type;
template<bool> struct EnableIf;
/** \internal
* \class product_evaluator
* Products need their own evaluator with more template arguments allowing for
* easier partial template specializations.
*/
template< typename T,
int ProductTag = internal::product_type<typename T::Lhs,typename T::Rhs>::ret,
typename LhsShape = typename evaluator_traits<typename T::Lhs>::Shape,
typename RhsShape = typename evaluator_traits<typename T::Rhs>::Shape,
typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
typename RhsScalar = typename traits<typename T::Rhs>::Scalar
> struct product_evaluator;
}
template<typename Lhs, typename Rhs,
int ProductType = internal::product_type<Lhs,Rhs>::value>
struct ProductReturnType;
// this is a workaround for sun CC
template<typename Lhs, typename Rhs> struct LazyProductReturnType;
namespace internal {
// Provides scalar/packet-wise product and product with accumulation
// with optional conjugation of the arguments.
template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_sum_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_difference_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_conj_product_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_min_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_max_op;
template<typename Scalar> struct scalar_opposite_op;
template<typename Scalar> struct scalar_conjugate_op;
template<typename Scalar> struct scalar_real_op;
template<typename Scalar> struct scalar_imag_op;
template<typename Scalar> struct scalar_abs_op;
template<typename Scalar> struct scalar_abs2_op;
template<typename Scalar> struct scalar_sqrt_op;
template<typename Scalar> struct scalar_rsqrt_op;
template<typename Scalar> struct scalar_exp_op;
template<typename Scalar> struct scalar_log_op;
template<typename Scalar> struct scalar_cos_op;
template<typename Scalar> struct scalar_sin_op;
template<typename Scalar> struct scalar_acos_op;
template<typename Scalar> struct scalar_asin_op;
template<typename Scalar> struct scalar_tan_op;
template<typename Scalar> struct scalar_inverse_op;
template<typename Scalar> struct scalar_square_op;
template<typename Scalar> struct scalar_cube_op;
template<typename Scalar, typename NewType> struct scalar_cast_op;
template<typename Scalar> struct scalar_random_op;
template<typename Scalar> struct scalar_constant_op;
template<typename Scalar> struct scalar_identity_op;
template<typename Scalar,bool iscpx> struct scalar_sign_op;
template<typename Scalar,typename ScalarExponent> struct scalar_pow_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_hypot_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_quotient_op;
// SpecialFunctions module
template<typename Scalar> struct scalar_lgamma_op;
template<typename Scalar> struct scalar_digamma_op;
template<typename Scalar> struct scalar_erf_op;
template<typename Scalar> struct scalar_erfc_op;
template<typename Scalar> struct scalar_igamma_op;
template<typename Scalar> struct scalar_igammac_op;
template<typename Scalar> struct scalar_zeta_op;
template<typename Scalar> struct scalar_betainc_op;
} // end namespace internal
struct IOFormat;
// Array module
template<typename _Scalar, int _Rows, int _Cols,
int _Options = AutoAlign |
#if EIGEN_GNUC_AT(3,4)
// workaround a bug in at least gcc 3.4.6
// the innermost ?: ternary operator is misparsed. We write it slightly
// differently and this makes gcc 3.4.6 happy, but it's ugly.
// The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
// (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
: !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
: Eigen::ColMajor ),
#else
( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
: (_Cols==1 && _Rows!=1) ? Eigen::ColMajor
: EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
#endif
int _MaxRows = _Rows, int _MaxCols = _Cols> class Array;
template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select;
template<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;
template<typename ExpressionType, int Direction> class VectorwiseOp;
template<typename MatrixType,int RowFactor,int ColFactor> class Replicate;
template<typename MatrixType, int Direction = BothDirections> class Reverse;
template<typename MatrixType> class FullPivLU;
template<typename MatrixType> class PartialPivLU;
namespace internal {
template<typename MatrixType> struct inverse_impl;
}
template<typename MatrixType> class HouseholderQR;
template<typename MatrixType> class ColPivHouseholderQR;
template<typename MatrixType> class FullPivHouseholderQR;
template<typename MatrixType> class CompleteOrthogonalDecomposition;
template<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;
template<typename MatrixType> class BDCSVD;
template<typename MatrixType, int UpLo = Lower> class LLT;
template<typename MatrixType, int UpLo = Lower> class LDLT;
template<typename VectorsType, typename CoeffsType, int Side=OnTheLeft> class HouseholderSequence;
template<typename Scalar> class JacobiRotation;
// Geometry module:
template<typename Derived, int _Dim> class RotationBase;
template<typename Lhs, typename Rhs> class Cross;
template<typename Derived> class QuaternionBase;
template<typename Scalar> class Rotation2D;
template<typename Scalar> class AngleAxis;
template<typename Scalar,int Dim> class Translation;
template<typename Scalar,int Dim> class AlignedBox;
template<typename Scalar, int Options = AutoAlign> class Quaternion;
template<typename Scalar,int Dim,int Mode,int _Options=AutoAlign> class Transform;
template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class ParametrizedLine;
template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class Hyperplane;
template<typename Scalar> class UniformScaling;
template<typename MatrixType,int Direction> class Homogeneous;
// Sparse module:
template<typename Derived> class SparseMatrixBase;
// MatrixFunctions module
template<typename Derived> struct MatrixExponentialReturnValue;
template<typename Derived> class MatrixFunctionReturnValue;
template<typename Derived> class MatrixSquareRootReturnValue;
template<typename Derived> class MatrixLogarithmReturnValue;
template<typename Derived> class MatrixPowerReturnValue;
template<typename Derived> class MatrixComplexPowerReturnValue;
namespace internal {
template <typename Scalar>
struct stem_function
{
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef ComplexScalar type(ComplexScalar, int);
};
}
} // end namespace Eigen
#endif // EIGEN_FORWARDDECLARATIONS_H
| 14,150 | 45.70297 | 150 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/MKL_support.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to Intel(R) MKL
* Include file with common MKL declarations
********************************************************************************
*/
#ifndef EIGEN_MKL_SUPPORT_H
#define EIGEN_MKL_SUPPORT_H
#ifdef EIGEN_USE_MKL_ALL
#ifndef EIGEN_USE_BLAS
#define EIGEN_USE_BLAS
#endif
#ifndef EIGEN_USE_LAPACKE
#define EIGEN_USE_LAPACKE
#endif
#ifndef EIGEN_USE_MKL_VML
#define EIGEN_USE_MKL_VML
#endif
#endif
#ifdef EIGEN_USE_LAPACKE_STRICT
#define EIGEN_USE_LAPACKE
#endif
#if defined(EIGEN_USE_MKL_VML)
#define EIGEN_USE_MKL
#endif
#if defined EIGEN_USE_MKL
# include <mkl.h>
/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/
# ifndef INTEL_MKL_VERSION
# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */
# elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/
# undef EIGEN_USE_MKL
# endif
# ifndef EIGEN_USE_MKL
/*If the MKL version is too old, undef everything*/
# undef EIGEN_USE_MKL_ALL
# undef EIGEN_USE_LAPACKE
# undef EIGEN_USE_MKL_VML
# undef EIGEN_USE_LAPACKE_STRICT
# undef EIGEN_USE_LAPACKE
# endif
#endif
#if defined EIGEN_USE_MKL
#define EIGEN_MKL_VML_THRESHOLD 128
/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */
/* MKL_BLAS, etc are not defined in 11.2 */
#ifdef MKL_DOMAIN_ALL
#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL
#else
#define EIGEN_MKL_DOMAIN_ALL MKL_ALL
#endif
#ifdef MKL_DOMAIN_BLAS
#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS
#else
#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS
#endif
#ifdef MKL_DOMAIN_FFT
#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT
#else
#define EIGEN_MKL_DOMAIN_FFT MKL_FFT
#endif
#ifdef MKL_DOMAIN_VML
#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML
#else
#define EIGEN_MKL_DOMAIN_VML MKL_VML
#endif
#ifdef MKL_DOMAIN_PARDISO
#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO
#else
#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO
#endif
#endif
namespace Eigen {
typedef std::complex<double> dcomplex;
typedef std::complex<float> scomplex;
#if defined(EIGEN_USE_MKL)
typedef MKL_INT BlasIndex;
#else
typedef int BlasIndex;
#endif
} // end namespace Eigen
#if defined(EIGEN_USE_BLAS)
#include "../../misc/blas.h"
#endif
#endif // EIGEN_MKL_SUPPORT_H
| 3,970 | 29.782946 | 127 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/Macros.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MACROS_H
#define EIGEN_MACROS_H
#define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 3
#define EIGEN_MINOR_VERSION 4
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
EIGEN_MINOR_VERSION>=z))))
// Compiler identification, EIGEN_COMP_*
/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC
#ifdef __GNUC__
#define EIGEN_COMP_GNUC 1
#else
#define EIGEN_COMP_GNUC 0
#endif
/// \internal EIGEN_COMP_CLANG set to major+minor version (e.g., 307 for clang 3.7) if the compiler is clang
#if defined(__clang__)
#define EIGEN_COMP_CLANG (__clang_major__*100+__clang_minor__)
#else
#define EIGEN_COMP_CLANG 0
#endif
/// \internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm
#if defined(__llvm__)
#define EIGEN_COMP_LLVM 1
#else
#define EIGEN_COMP_LLVM 0
#endif
/// \internal EIGEN_COMP_ICC set to __INTEL_COMPILER if the compiler is Intel compiler, 0 otherwise
#if defined(__INTEL_COMPILER)
#define EIGEN_COMP_ICC __INTEL_COMPILER
#else
#define EIGEN_COMP_ICC 0
#endif
/// \internal EIGEN_COMP_MINGW set to 1 if the compiler is mingw
#if defined(__MINGW32__)
#define EIGEN_COMP_MINGW 1
#else
#define EIGEN_COMP_MINGW 0
#endif
/// \internal EIGEN_COMP_SUNCC set to 1 if the compiler is Solaris Studio
#if defined(__SUNPRO_CC)
#define EIGEN_COMP_SUNCC 1
#else
#define EIGEN_COMP_SUNCC 0
#endif
/// \internal EIGEN_COMP_MSVC set to _MSC_VER if the compiler is Microsoft Visual C++, 0 otherwise.
#if defined(_MSC_VER)
#define EIGEN_COMP_MSVC _MSC_VER
#else
#define EIGEN_COMP_MSVC 0
#endif
// For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC:
// name ver MSC_VER
// 2008 9 1500
// 2010 10 1600
// 2012 11 1700
// 2013 12 1800
// 2015 14 1900
// "15" 15 1900
/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC or clang-cl
#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC || EIGEN_COMP_LLVM || EIGEN_COMP_CLANG)
#define EIGEN_COMP_MSVC_STRICT _MSC_VER
#else
#define EIGEN_COMP_MSVC_STRICT 0
#endif
/// \internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++
#if defined(__IBMCPP__) || defined(__xlc__)
#define EIGEN_COMP_IBM 1
#else
#define EIGEN_COMP_IBM 0
#endif
/// \internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler
#if defined(__PGI)
#define EIGEN_COMP_PGI 1
#else
#define EIGEN_COMP_PGI 0
#endif
/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler
#if defined(__CC_ARM) || defined(__ARMCC_VERSION)
#define EIGEN_COMP_ARM 1
#else
#define EIGEN_COMP_ARM 0
#endif
/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler
#if defined(__EMSCRIPTEN__)
#define EIGEN_COMP_EMSCRIPTEN 1
#else
#define EIGEN_COMP_EMSCRIPTEN 0
#endif
/// \internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.)
#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM || EIGEN_COMP_EMSCRIPTEN)
#define EIGEN_COMP_GNUC_STRICT 1
#else
#define EIGEN_COMP_GNUC_STRICT 0
#endif
#if EIGEN_COMP_GNUC
#define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
#define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
#define EIGEN_GNUC_AT(x,y) ( __GNUC__==x && __GNUC_MINOR__==y )
#else
#define EIGEN_GNUC_AT_LEAST(x,y) 0
#define EIGEN_GNUC_AT_MOST(x,y) 0
#define EIGEN_GNUC_AT(x,y) 0
#endif
// FIXME: could probably be removed as we do not support gcc 3.x anymore
#if EIGEN_COMP_GNUC && (__GNUC__ <= 3)
#define EIGEN_GCC3_OR_OLDER 1
#else
#define EIGEN_GCC3_OR_OLDER 0
#endif
// Architecture identification, EIGEN_ARCH_*
#if defined(__x86_64__) || defined(_M_X64) || defined(__amd64)
#define EIGEN_ARCH_x86_64 1
#else
#define EIGEN_ARCH_x86_64 0
#endif
#if defined(__i386__) || defined(_M_IX86) || defined(_X86_) || defined(__i386)
#define EIGEN_ARCH_i386 1
#else
#define EIGEN_ARCH_i386 0
#endif
#if EIGEN_ARCH_x86_64 || EIGEN_ARCH_i386
#define EIGEN_ARCH_i386_OR_x86_64 1
#else
#define EIGEN_ARCH_i386_OR_x86_64 0
#endif
/// \internal EIGEN_ARCH_ARM set to 1 if the architecture is ARM
#if defined(__arm__)
#define EIGEN_ARCH_ARM 1
#else
#define EIGEN_ARCH_ARM 0
#endif
/// \internal EIGEN_ARCH_ARM64 set to 1 if the architecture is ARM64
#if defined(__aarch64__)
#define EIGEN_ARCH_ARM64 1
#else
#define EIGEN_ARCH_ARM64 0
#endif
#if EIGEN_ARCH_ARM || EIGEN_ARCH_ARM64
#define EIGEN_ARCH_ARM_OR_ARM64 1
#else
#define EIGEN_ARCH_ARM_OR_ARM64 0
#endif
/// \internal EIGEN_ARCH_MIPS set to 1 if the architecture is MIPS
#if defined(__mips__) || defined(__mips)
#define EIGEN_ARCH_MIPS 1
#else
#define EIGEN_ARCH_MIPS 0
#endif
/// \internal EIGEN_ARCH_SPARC set to 1 if the architecture is SPARC
#if defined(__sparc__) || defined(__sparc)
#define EIGEN_ARCH_SPARC 1
#else
#define EIGEN_ARCH_SPARC 0
#endif
/// \internal EIGEN_ARCH_IA64 set to 1 if the architecture is Intel Itanium
#if defined(__ia64__)
#define EIGEN_ARCH_IA64 1
#else
#define EIGEN_ARCH_IA64 0
#endif
/// \internal EIGEN_ARCH_PPC set to 1 if the architecture is PowerPC
#if defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC)
#define EIGEN_ARCH_PPC 1
#else
#define EIGEN_ARCH_PPC 0
#endif
// Operating system identification, EIGEN_OS_*
/// \internal EIGEN_OS_UNIX set to 1 if the OS is a unix variant
#if defined(__unix__) || defined(__unix)
#define EIGEN_OS_UNIX 1
#else
#define EIGEN_OS_UNIX 0
#endif
/// \internal EIGEN_OS_LINUX set to 1 if the OS is based on Linux kernel
#if defined(__linux__)
#define EIGEN_OS_LINUX 1
#else
#define EIGEN_OS_LINUX 0
#endif
/// \internal EIGEN_OS_ANDROID set to 1 if the OS is Android
// note: ANDROID is defined when using ndk_build, __ANDROID__ is defined when using a standalone toolchain.
#if defined(__ANDROID__) || defined(ANDROID)
#define EIGEN_OS_ANDROID 1
#else
#define EIGEN_OS_ANDROID 0
#endif
/// \internal EIGEN_OS_GNULINUX set to 1 if the OS is GNU Linux and not Linux-based OS (e.g., not android)
#if defined(__gnu_linux__) && !(EIGEN_OS_ANDROID)
#define EIGEN_OS_GNULINUX 1
#else
#define EIGEN_OS_GNULINUX 0
#endif
/// \internal EIGEN_OS_BSD set to 1 if the OS is a BSD variant
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__) || defined(__DragonFly__)
#define EIGEN_OS_BSD 1
#else
#define EIGEN_OS_BSD 0
#endif
/// \internal EIGEN_OS_MAC set to 1 if the OS is MacOS
#if defined(__APPLE__)
#define EIGEN_OS_MAC 1
#else
#define EIGEN_OS_MAC 0
#endif
/// \internal EIGEN_OS_QNX set to 1 if the OS is QNX
#if defined(__QNX__)
#define EIGEN_OS_QNX 1
#else
#define EIGEN_OS_QNX 0
#endif
/// \internal EIGEN_OS_WIN set to 1 if the OS is Windows based
#if defined(_WIN32)
#define EIGEN_OS_WIN 1
#else
#define EIGEN_OS_WIN 0
#endif
/// \internal EIGEN_OS_WIN64 set to 1 if the OS is Windows 64bits
#if defined(_WIN64)
#define EIGEN_OS_WIN64 1
#else
#define EIGEN_OS_WIN64 0
#endif
/// \internal EIGEN_OS_WINCE set to 1 if the OS is Windows CE
#if defined(_WIN32_WCE)
#define EIGEN_OS_WINCE 1
#else
#define EIGEN_OS_WINCE 0
#endif
/// \internal EIGEN_OS_CYGWIN set to 1 if the OS is Windows/Cygwin
#if defined(__CYGWIN__)
#define EIGEN_OS_CYGWIN 1
#else
#define EIGEN_OS_CYGWIN 0
#endif
/// \internal EIGEN_OS_WIN_STRICT set to 1 if the OS is really Windows and not some variants
#if EIGEN_OS_WIN && !( EIGEN_OS_WINCE || EIGEN_OS_CYGWIN )
#define EIGEN_OS_WIN_STRICT 1
#else
#define EIGEN_OS_WIN_STRICT 0
#endif
/// \internal EIGEN_OS_SUN set to 1 if the OS is SUN
#if (defined(sun) || defined(__sun)) && !(defined(__SVR4) || defined(__svr4__))
#define EIGEN_OS_SUN 1
#else
#define EIGEN_OS_SUN 0
#endif
/// \internal EIGEN_OS_SOLARIS set to 1 if the OS is Solaris
#if (defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))
#define EIGEN_OS_SOLARIS 1
#else
#define EIGEN_OS_SOLARIS 0
#endif
#if EIGEN_GNUC_AT_MOST(4,3) && !EIGEN_COMP_CLANG
// see bug 89
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
#else
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
#endif
// This macro can be used to prevent from macro expansion, e.g.:
// std::max EIGEN_NOT_A_MACRO(a,b)
#define EIGEN_NOT_A_MACRO
#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::RowMajor
#else
#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::ColMajor
#endif
#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
#endif
// Cross compiler wrapper around LLVM's __has_builtin
#ifdef __has_builtin
# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
#else
# define EIGEN_HAS_BUILTIN(x) 0
#endif
// A Clang feature extension to determine compiler features.
// We use it to determine 'cxx_rvalue_references'
#ifndef __has_feature
# define __has_feature(x) 0
#endif
// Upperbound on the C++ version to use.
// Expected values are 03, 11, 14, 17, etc.
// By default, let's use an arbitrarily large C++ version.
#ifndef EIGEN_MAX_CPP_VER
#define EIGEN_MAX_CPP_VER 99
#endif
#if EIGEN_MAX_CPP_VER>=11 && (defined(__cplusplus) && (__cplusplus >= 201103L) || EIGEN_COMP_MSVC >= 1900)
#define EIGEN_HAS_CXX11 1
#else
#define EIGEN_HAS_CXX11 0
#endif
// Do we support r-value references?
#ifndef EIGEN_HAS_RVALUE_REFERENCES
#if EIGEN_MAX_CPP_VER>=11 && \
(__has_feature(cxx_rvalue_references) || \
(defined(__cplusplus) && __cplusplus >= 201103L) || \
(EIGEN_COMP_MSVC >= 1600))
#define EIGEN_HAS_RVALUE_REFERENCES 1
#else
#define EIGEN_HAS_RVALUE_REFERENCES 0
#endif
#endif
// Does the compiler support C99?
#ifndef EIGEN_HAS_C99_MATH
#if EIGEN_MAX_CPP_VER>=11 && \
((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)) \
|| (defined(__GNUC__) && defined(_GLIBCXX_USE_C99)) \
|| (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)))
#define EIGEN_HAS_C99_MATH 1
#else
#define EIGEN_HAS_C99_MATH 0
#endif
#endif
// Does the compiler support result_of?
#ifndef EIGEN_HAS_STD_RESULT_OF
#if EIGEN_MAX_CPP_VER>=11 && ((__has_feature(cxx_lambdas) || (defined(__cplusplus) && __cplusplus >= 201103L)))
#define EIGEN_HAS_STD_RESULT_OF 1
#else
#define EIGEN_HAS_STD_RESULT_OF 0
#endif
#endif
// Does the compiler support variadic templates?
#ifndef EIGEN_HAS_VARIADIC_TEMPLATES
#if EIGEN_MAX_CPP_VER>=11 && (__cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900) \
&& ( !defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000) )
// ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices:
// this prevents nvcc from crashing when compiling Eigen on Tegra X1
#define EIGEN_HAS_VARIADIC_TEMPLATES 1
#else
#define EIGEN_HAS_VARIADIC_TEMPLATES 0
#endif
#endif
// Does the compiler fully support const expressions? (as in c++14)
#ifndef EIGEN_HAS_CONSTEXPR
#ifdef __CUDACC__
// Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above
#if EIGEN_MAX_CPP_VER>=14 && (__cplusplus > 199711L && defined(__CUDACC_VER__) && (EIGEN_COMP_CLANG || __CUDACC_VER__ >= 70500))
#define EIGEN_HAS_CONSTEXPR 1
#endif
#elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (defined(__cplusplus) && __cplusplus >= 201402L) || \
(EIGEN_GNUC_AT_LEAST(4,8) && (__cplusplus > 199711L)))
#define EIGEN_HAS_CONSTEXPR 1
#endif
#ifndef EIGEN_HAS_CONSTEXPR
#define EIGEN_HAS_CONSTEXPR 0
#endif
#endif
// Does the compiler support C++11 math?
// Let's be conservative and enable the default C++11 implementation only if we are sure it exists
#ifndef EIGEN_HAS_CXX11_MATH
#if EIGEN_MAX_CPP_VER>=11 && ((__cplusplus > 201103L) || (__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC) \
&& (EIGEN_ARCH_i386_OR_x86_64) && (EIGEN_OS_GNULINUX || EIGEN_OS_WIN_STRICT || EIGEN_OS_MAC))
#define EIGEN_HAS_CXX11_MATH 1
#else
#define EIGEN_HAS_CXX11_MATH 0
#endif
#endif
// Does the compiler support proper C++11 containers?
#ifndef EIGEN_HAS_CXX11_CONTAINERS
#if EIGEN_MAX_CPP_VER>=11 && \
((__cplusplus > 201103L) \
|| ((__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_ICC>=1400)) \
|| EIGEN_COMP_MSVC >= 1900)
#define EIGEN_HAS_CXX11_CONTAINERS 1
#else
#define EIGEN_HAS_CXX11_CONTAINERS 0
#endif
#endif
// Does the compiler support C++11 noexcept?
#ifndef EIGEN_HAS_CXX11_NOEXCEPT
#if EIGEN_MAX_CPP_VER>=11 && \
(__has_feature(cxx_noexcept) \
|| (__cplusplus > 201103L) \
|| ((__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_ICC>=1400)) \
|| EIGEN_COMP_MSVC >= 1900)
#define EIGEN_HAS_CXX11_NOEXCEPT 1
#else
#define EIGEN_HAS_CXX11_NOEXCEPT 0
#endif
#endif
/** Allows to disable some optimizations which might affect the accuracy of the result.
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
* They currently include:
* - single precision ArrayBase::sin() and ArrayBase::cos() for SSE and AVX vectorization.
*/
#ifndef EIGEN_FAST_MATH
#define EIGEN_FAST_MATH 1
#endif
#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl;
// concatenate two tokens
#define EIGEN_CAT2(a,b) a ## b
#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b)
#define EIGEN_COMMA ,
// convert a token to a string
#define EIGEN_MAKESTRING2(a) #a
#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
// but GCC is still doing fine with just inline.
#if EIGEN_COMP_MSVC || EIGEN_COMP_ICC
#define EIGEN_STRONG_INLINE __forceinline
#else
#define EIGEN_STRONG_INLINE inline
#endif
// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
// attribute to maximize inlining. This should only be used when really necessary: in particular,
// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.
// FIXME with the always_inline attribute,
// gcc 3.4.x and 4.1 reports the following compilation error:
// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
// : function body not available
// See also bug 1367
#if EIGEN_GNUC_AT_LEAST(4,2)
#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
#else
#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
#endif
#if EIGEN_COMP_GNUC
#define EIGEN_DONT_INLINE __attribute__((noinline))
#elif EIGEN_COMP_MSVC
#define EIGEN_DONT_INLINE __declspec(noinline)
#else
#define EIGEN_DONT_INLINE
#endif
#if EIGEN_COMP_GNUC
#define EIGEN_PERMISSIVE_EXPR __extension__
#else
#define EIGEN_PERMISSIVE_EXPR
#endif
// this macro allows to get rid of linking errors about multiply defined functions.
// - static is not very good because it prevents definitions from different object files to be merged.
// So static causes the resulting linked executable to be bloated with multiple copies of the same function.
// - inline is not perfect either as it unwantedly hints the compiler toward inlining the function.
#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS inline
#ifdef NDEBUG
# ifndef EIGEN_NO_DEBUG
# define EIGEN_NO_DEBUG
# endif
#endif
// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89
#ifdef EIGEN_NO_DEBUG
#define eigen_plain_assert(x)
#else
#if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO
namespace Eigen {
namespace internal {
inline bool copy_bool(bool b) { return b; }
}
}
#define eigen_plain_assert(x) assert(x)
#else
// work around bug 89
#include <cstdlib> // for abort
#include <iostream> // for std::cerr
namespace Eigen {
namespace internal {
// trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers.
// see bug 89.
namespace {
EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; }
}
inline void assert_fail(const char *condition, const char *function, const char *file, int line)
{
std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl;
abort();
}
}
}
#define eigen_plain_assert(x) \
do { \
if(!Eigen::internal::copy_bool(x)) \
Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \
} while(false)
#endif
#endif
// eigen_assert can be overridden
#ifndef eigen_assert
#define eigen_assert(x) eigen_plain_assert(x)
#endif
#ifdef EIGEN_INTERNAL_DEBUGGING
#define eigen_internal_assert(x) eigen_assert(x)
#else
#define eigen_internal_assert(x)
#endif
#ifdef EIGEN_NO_DEBUG
#define EIGEN_ONLY_USED_FOR_DEBUG(x) EIGEN_UNUSED_VARIABLE(x)
#else
#define EIGEN_ONLY_USED_FOR_DEBUG(x)
#endif
#ifndef EIGEN_NO_DEPRECATED_WARNING
#if EIGEN_COMP_GNUC
#define EIGEN_DEPRECATED __attribute__((deprecated))
#elif EIGEN_COMP_MSVC
#define EIGEN_DEPRECATED __declspec(deprecated)
#else
#define EIGEN_DEPRECATED
#endif
#else
#define EIGEN_DEPRECATED
#endif
#if EIGEN_COMP_GNUC
#define EIGEN_UNUSED __attribute__((unused))
#else
#define EIGEN_UNUSED
#endif
// Suppresses 'unused variable' warnings.
namespace Eigen {
namespace internal {
template<typename T> EIGEN_DEVICE_FUNC void ignore_unused_variable(const T&) {}
}
}
#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
#if !defined(EIGEN_ASM_COMMENT)
#if EIGEN_COMP_GNUC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64)
#define EIGEN_ASM_COMMENT(X) __asm__("#" X)
#else
#define EIGEN_ASM_COMMENT(X)
#endif
#endif
//------------------------------------------------------------------------------------------
// Static and dynamic alignment control
//
// The main purpose of this section is to define EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES
// as the maximal boundary in bytes on which dynamically and statically allocated data may be alignment respectively.
// The values of EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES can be specified by the user. If not,
// a default value is automatically computed based on architecture, compiler, and OS.
//
// This section also defines macros EIGEN_ALIGN_TO_BOUNDARY(N) and the shortcuts EIGEN_ALIGN{8,16,32,_MAX}
// to be used to declare statically aligned buffers.
//------------------------------------------------------------------------------------------
/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
* However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
* so that vectorization doesn't affect binary compatibility.
*
* If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
* vectorized and non-vectorized code.
*/
#if (defined __CUDACC__)
#define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)
#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM
#define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
#elif EIGEN_COMP_MSVC
#define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
#elif EIGEN_COMP_SUNCC
// FIXME not sure about this one:
#define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
#else
#error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
#endif
// If the user explicitly disable vectorization, then we also disable alignment
#if defined(EIGEN_DONT_VECTORIZE)
#define EIGEN_IDEAL_MAX_ALIGN_BYTES 0
#elif defined(EIGEN_VECTORIZE_AVX512)
// 64 bytes static alignmeent is preferred only if really required
#define EIGEN_IDEAL_MAX_ALIGN_BYTES 64
#elif defined(__AVX__)
// 32 bytes static alignmeent is preferred only if really required
#define EIGEN_IDEAL_MAX_ALIGN_BYTES 32
#else
#define EIGEN_IDEAL_MAX_ALIGN_BYTES 16
#endif
// EIGEN_MIN_ALIGN_BYTES defines the minimal value for which the notion of explicit alignment makes sense
#define EIGEN_MIN_ALIGN_BYTES 16
// Defined the boundary (in bytes) on which the data needs to be aligned. Note
// that unless EIGEN_ALIGN is defined and not equal to 0, the data may not be
// aligned at all regardless of the value of this #define.
#if (defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)) && defined(EIGEN_MAX_STATIC_ALIGN_BYTES) && EIGEN_MAX_STATIC_ALIGN_BYTES>0
#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY.
#endif
// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprectated
// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0
#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)
#ifdef EIGEN_MAX_STATIC_ALIGN_BYTES
#undef EIGEN_MAX_STATIC_ALIGN_BYTES
#endif
#define EIGEN_MAX_STATIC_ALIGN_BYTES 0
#endif
#ifndef EIGEN_MAX_STATIC_ALIGN_BYTES
// Try to automatically guess what is the best default value for EIGEN_MAX_STATIC_ALIGN_BYTES
// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
// certain common platform (compiler+architecture combinations) to avoid these problems.
// Only static alignment is really problematic (relies on nonstandard compiler extensions),
// try to keep heap alignment even when we have to disable static alignment.
#if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64)
#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
#elif EIGEN_ARCH_ARM_OR_ARM64 && EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(4, 6)
// Old versions of GCC on ARM, at least 4.4, were once seen to have buggy static alignment support.
// Not sure which version fixed it, hopefully it doesn't affect 4.7, which is still somewhat in use.
// 4.8 and newer seem definitely unaffected.
#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
#else
#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
#endif
// static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
&& !EIGEN_GCC3_OR_OLDER \
&& !EIGEN_COMP_SUNCC \
&& !EIGEN_OS_QNX
#define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
#else
#define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
#endif
#if EIGEN_ARCH_WANTS_STACK_ALIGNMENT
#define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
#else
#define EIGEN_MAX_STATIC_ALIGN_BYTES 0
#endif
#endif
// If EIGEN_MAX_ALIGN_BYTES is defined, then it is considered as an upper bound for EIGEN_MAX_ALIGN_BYTES
#if defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES<EIGEN_MAX_STATIC_ALIGN_BYTES
#undef EIGEN_MAX_STATIC_ALIGN_BYTES
#define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
#endif
#if EIGEN_MAX_STATIC_ALIGN_BYTES==0 && !defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
#define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
#endif
// At this stage, EIGEN_MAX_STATIC_ALIGN_BYTES>0 is the true test whether we want to align arrays on the stack or not.
// It takes into account both the user choice to explicitly enable/disable alignment (by settting EIGEN_MAX_STATIC_ALIGN_BYTES)
// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT).
// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used.
// Shortcuts to EIGEN_ALIGN_TO_BOUNDARY
#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
#define EIGEN_ALIGN32 EIGEN_ALIGN_TO_BOUNDARY(32)
#define EIGEN_ALIGN64 EIGEN_ALIGN_TO_BOUNDARY(64)
#if EIGEN_MAX_STATIC_ALIGN_BYTES>0
#define EIGEN_ALIGN_MAX EIGEN_ALIGN_TO_BOUNDARY(EIGEN_MAX_STATIC_ALIGN_BYTES)
#else
#define EIGEN_ALIGN_MAX
#endif
// Dynamic alignment control
#if defined(EIGEN_DONT_ALIGN) && defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES>0
#error EIGEN_MAX_ALIGN_BYTES and EIGEN_DONT_ALIGN are both defined with EIGEN_MAX_ALIGN_BYTES!=0. Use EIGEN_MAX_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN.
#endif
#ifdef EIGEN_DONT_ALIGN
#ifdef EIGEN_MAX_ALIGN_BYTES
#undef EIGEN_MAX_ALIGN_BYTES
#endif
#define EIGEN_MAX_ALIGN_BYTES 0
#elif !defined(EIGEN_MAX_ALIGN_BYTES)
#define EIGEN_MAX_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
#endif
#if EIGEN_IDEAL_MAX_ALIGN_BYTES > EIGEN_MAX_ALIGN_BYTES
#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
#else
#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES
#endif
#ifndef EIGEN_UNALIGNED_VECTORIZE
#define EIGEN_UNALIGNED_VECTORIZE 1
#endif
//----------------------------------------------------------------------
#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
#define EIGEN_RESTRICT
#endif
#ifndef EIGEN_RESTRICT
#define EIGEN_RESTRICT __restrict
#endif
#ifndef EIGEN_STACK_ALLOCATION_LIMIT
// 131072 == 128 KB
#define EIGEN_STACK_ALLOCATION_LIMIT 131072
#endif
#ifndef EIGEN_DEFAULT_IO_FORMAT
#ifdef EIGEN_MAKING_DOCS
// format used in Eigen's documentation
// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic.
#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "")
#else
#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat()
#endif
#endif
// just an empty macro !
#define EIGEN_EMPTY
#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || defined(__CUDACC_VER__)) // for older MSVC versions, as well as 1900 && CUDA 8, using the base operator is sufficient (cf Bugs 1000, 1324)
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =;
#elif EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =; \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
template <typename OtherDerived> \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }
#else
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =; \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \
{ \
Base::operator=(other); \
return *this; \
}
#endif
/** \internal
* \brief Macro to manually inherit assignment operators.
* This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
*/
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
/**
* Just a side note. Commenting within defines works only by documenting
* behind the object (via '!<'). Comments cannot be multi-line and thus
* we have these extra long lines. What is confusing doxygen over here is
* that we use '\' and basically have a bunch of typedefs with their
* documentation in a single line.
**/
#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::internal::ref_selector<Derived>::type Nested; \
typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
typedef typename Eigen::internal::traits<Derived>::StorageIndex StorageIndex; \
enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
Flags = Eigen::internal::traits<Derived>::Flags, \
SizeAtCompileTime = Base::SizeAtCompileTime, \
MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
using Base::derived; \
using Base::const_cast_derived;
// FIXME Maybe the EIGEN_DENSE_PUBLIC_INTERFACE could be removed as importing PacketScalar is rarely needed
#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
typedef typename Base::PacketScalar PacketScalar;
#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1,
// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over
// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.
#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
: ((int)a == 1 || (int)b == 1) ? 1 \
: ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
: ((int)a <= (int)b) ? (int)a : (int)b)
// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values
// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is
// (between 0 and 3), it is not more than 3.
#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
: ((int)a == 1 || (int)b == 1) ? 1 \
: ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \
: ((int)a == Dynamic) ? (int)b \
: ((int)b == Dynamic) ? (int)a \
: ((int)a <= (int)b) ? (int)a : (int)b)
// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here.
#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
: ((int)a >= (int)b) ? (int)a : (int)b)
#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
#define EIGEN_IMPLIES(a,b) (!(a) || (b))
// the expression type of a standard coefficient wise binary operation
#define EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME) \
CwiseBinaryOp< \
EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)< \
typename internal::traits<LHS>::Scalar, \
typename internal::traits<RHS>::Scalar \
>, \
const LHS, \
const RHS \
>
#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,OPNAME) \
template<typename OtherDerived> \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,OPNAME) \
(METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
{ \
return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,OPNAME)(derived(), other.derived()); \
}
#define EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,TYPEA,TYPEB) \
(Eigen::internal::has_ReturnType<Eigen::ScalarBinaryOpTraits<TYPEA,TYPEB,EIGEN_CAT(EIGEN_CAT(Eigen::internal::scalar_,OPNAME),_op)<TYPEA,TYPEB> > >::value)
#define EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(EXPR,SCALAR,OPNAME) \
CwiseBinaryOp<EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)<typename internal::traits<EXPR>::Scalar,SCALAR>, const EXPR, \
const typename internal::plain_constant_type<EXPR,SCALAR>::type>
#define EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(SCALAR,EXPR,OPNAME) \
CwiseBinaryOp<EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)<SCALAR,typename internal::traits<EXPR>::Scalar>, \
const typename internal::plain_constant_type<EXPR,SCALAR>::type, const EXPR>
// Workaround for MSVC 2010 (see ML thread "patch with compile for for MSVC 2010")
#if EIGEN_COMP_MSVC_STRICT<=1600
#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) typename internal::enable_if<true,X>::type
#else
#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) X
#endif
#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) \
template <typename T> EIGEN_DEVICE_FUNC inline \
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type,OPNAME))\
(METHOD)(const T& scalar) const { \
typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type PromotedT; \
return EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,PromotedT,OPNAME)(derived(), \
typename internal::plain_constant_type<Derived,PromotedT>::type(derived().rows(), derived().cols(), internal::scalar_constant_op<PromotedT>(scalar))); \
}
#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \
template <typename T> EIGEN_DEVICE_FUNC inline friend \
EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type,Derived,OPNAME)) \
(METHOD)(const T& scalar, const StorageBaseType& matrix) { \
typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type PromotedT; \
return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedT,Derived,OPNAME)( \
typename internal::plain_constant_type<Derived,PromotedT>::type(matrix.derived().rows(), matrix.derived().cols(), internal::scalar_constant_op<PromotedT>(scalar)), matrix.derived()); \
}
#define EIGEN_MAKE_SCALAR_BINARY_OP(METHOD,OPNAME) \
EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \
EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME)
#ifdef EIGEN_EXCEPTIONS
# define EIGEN_THROW_X(X) throw X
# define EIGEN_THROW throw
# define EIGEN_TRY try
# define EIGEN_CATCH(X) catch (X)
#else
# ifdef __CUDA_ARCH__
# define EIGEN_THROW_X(X) asm("trap;")
# define EIGEN_THROW asm("trap;")
# else
# define EIGEN_THROW_X(X) std::abort()
# define EIGEN_THROW std::abort()
# endif
# define EIGEN_TRY if (true)
# define EIGEN_CATCH(X) else
#endif
#if EIGEN_HAS_CXX11_NOEXCEPT
# define EIGEN_INCLUDE_TYPE_TRAITS
# define EIGEN_NOEXCEPT noexcept
# define EIGEN_NOEXCEPT_IF(x) noexcept(x)
# define EIGEN_NO_THROW noexcept(true)
# define EIGEN_EXCEPTION_SPEC(X) noexcept(false)
#else
# define EIGEN_NOEXCEPT
# define EIGEN_NOEXCEPT_IF(x)
# define EIGEN_NO_THROW throw()
# define EIGEN_EXCEPTION_SPEC(X) throw(X)
#endif
#endif // EIGEN_MACROS_H
| 36,377 | 35.634441 | 323 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/Memory.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
// Copyright (C) 2008-2009 Benoit Jacob <[email protected]>
// Copyright (C) 2009 Kenneth Riddile <[email protected]>
// Copyright (C) 2010 Hauke Heibel <[email protected]>
// Copyright (C) 2010 Thomas Capricelli <[email protected]>
// Copyright (C) 2013 Pavel Holoborodko <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*****************************************************************************
*** Platform checks for aligned malloc functions ***
*****************************************************************************/
#ifndef EIGEN_MEMORY_H
#define EIGEN_MEMORY_H
#ifndef EIGEN_MALLOC_ALREADY_ALIGNED
// Try to determine automatically if malloc is already aligned.
// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
// http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
// This is true at least since glibc 2.8.
// This leaves the question how to detect 64-bit. According to this document,
// http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf
// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
// quite safe, at least within the context of glibc, to equate 64-bit with LP64.
#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
&& defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ ) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)
#define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
#else
#define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
#endif
// FreeBSD 6 seems to have 16-byte aligned malloc
// See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup
// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures
// See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup
#if defined(__FreeBSD__) && !(EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)
#define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1
#else
#define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0
#endif
#if (EIGEN_OS_MAC && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \
|| (EIGEN_OS_WIN64 && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \
|| EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \
|| EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
#define EIGEN_MALLOC_ALREADY_ALIGNED 1
#else
#define EIGEN_MALLOC_ALREADY_ALIGNED 0
#endif
#endif
namespace Eigen {
namespace internal {
EIGEN_DEVICE_FUNC
inline void throw_std_bad_alloc()
{
#ifdef EIGEN_EXCEPTIONS
throw std::bad_alloc();
#else
std::size_t huge = static_cast<std::size_t>(-1);
new int[huge];
#endif
}
/*****************************************************************************
*** Implementation of handmade aligned functions ***
*****************************************************************************/
/* ----- Hand made implementations of aligned malloc/free and realloc ----- */
/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
* Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
*/
inline void* handmade_aligned_malloc(std::size_t size)
{
void *original = std::malloc(size+EIGEN_DEFAULT_ALIGN_BYTES);
if (original == 0) return 0;
void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
*(reinterpret_cast<void**>(aligned) - 1) = original;
return aligned;
}
/** \internal Frees memory allocated with handmade_aligned_malloc */
inline void handmade_aligned_free(void *ptr)
{
if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
}
/** \internal
* \brief Reallocates aligned memory.
* Since we know that our handmade version is based on std::malloc
* we can use std::realloc to implement efficient reallocation.
*/
inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0)
{
if (ptr == 0) return handmade_aligned_malloc(size);
void *original = *(reinterpret_cast<void**>(ptr) - 1);
std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original);
original = std::realloc(original,size+EIGEN_DEFAULT_ALIGN_BYTES);
if (original == 0) return 0;
void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
void *previous_aligned = static_cast<char *>(original)+previous_offset;
if(aligned!=previous_aligned)
std::memmove(aligned, previous_aligned, size);
*(reinterpret_cast<void**>(aligned) - 1) = original;
return aligned;
}
/*****************************************************************************
*** Implementation of portable aligned versions of malloc/free/realloc ***
*****************************************************************************/
#ifdef EIGEN_NO_MALLOC
EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()
{
eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
}
#elif defined EIGEN_RUNTIME_NO_MALLOC
EIGEN_DEVICE_FUNC inline bool is_malloc_allowed_impl(bool update, bool new_value = false)
{
static bool value = true;
if (update == 1)
value = new_value;
return value;
}
EIGEN_DEVICE_FUNC inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }
EIGEN_DEVICE_FUNC inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }
EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()
{
eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
}
#else
EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()
{}
#endif
/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 or 32 bytes alignment depending on the requirements.
* On allocation error, the returned pointer is null, and std::bad_alloc is thrown.
*/
EIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size)
{
check_that_malloc_is_allowed();
void *result;
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
result = std::malloc(size);
#if EIGEN_DEFAULT_ALIGN_BYTES==16
eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade alignd memory allocator.");
#endif
#else
result = handmade_aligned_malloc(size);
#endif
if(!result && size)
throw_std_bad_alloc();
return result;
}
/** \internal Frees memory allocated with aligned_malloc. */
EIGEN_DEVICE_FUNC inline void aligned_free(void *ptr)
{
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
std::free(ptr);
#else
handmade_aligned_free(ptr);
#endif
}
/**
* \internal
* \brief Reallocates an aligned block of memory.
* \throws std::bad_alloc on allocation failure
*/
inline void* aligned_realloc(void *ptr, std::size_t new_size, std::size_t old_size)
{
EIGEN_UNUSED_VARIABLE(old_size);
void *result;
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
result = std::realloc(ptr,new_size);
#else
result = handmade_aligned_realloc(ptr,new_size,old_size);
#endif
if (!result && new_size)
throw_std_bad_alloc();
return result;
}
/*****************************************************************************
*** Implementation of conditionally aligned functions ***
*****************************************************************************/
/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
* On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.
*/
template<bool Align> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc(std::size_t size)
{
return aligned_malloc(size);
}
template<> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc<false>(std::size_t size)
{
check_that_malloc_is_allowed();
void *result = std::malloc(size);
if(!result && size)
throw_std_bad_alloc();
return result;
}
/** \internal Frees memory allocated with conditional_aligned_malloc */
template<bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void *ptr)
{
aligned_free(ptr);
}
template<> EIGEN_DEVICE_FUNC inline void conditional_aligned_free<false>(void *ptr)
{
std::free(ptr);
}
template<bool Align> inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size)
{
return aligned_realloc(ptr, new_size, old_size);
}
template<> inline void* conditional_aligned_realloc<false>(void* ptr, std::size_t new_size, std::size_t)
{
return std::realloc(ptr, new_size);
}
/*****************************************************************************
*** Construction/destruction of array elements ***
*****************************************************************************/
/** \internal Destructs the elements of an array.
* The \a size parameters tells on how many objects to call the destructor of T.
*/
template<typename T> EIGEN_DEVICE_FUNC inline void destruct_elements_of_array(T *ptr, std::size_t size)
{
// always destruct an array starting from the end.
if(ptr)
while(size) ptr[--size].~T();
}
/** \internal Constructs the elements of an array.
* The \a size parameter tells on how many objects to call the constructor of T.
*/
template<typename T> EIGEN_DEVICE_FUNC inline T* construct_elements_of_array(T *ptr, std::size_t size)
{
std::size_t i;
EIGEN_TRY
{
for (i = 0; i < size; ++i) ::new (ptr + i) T;
return ptr;
}
EIGEN_CATCH(...)
{
destruct_elements_of_array(ptr, i);
EIGEN_THROW;
}
return NULL;
}
/*****************************************************************************
*** Implementation of aligned new/delete-like functions ***
*****************************************************************************/
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void check_size_for_overflow(std::size_t size)
{
if(size > std::size_t(-1) / sizeof(T))
throw_std_bad_alloc();
}
/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
* On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.
* The default constructor of T is called.
*/
template<typename T> EIGEN_DEVICE_FUNC inline T* aligned_new(std::size_t size)
{
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
EIGEN_TRY
{
return construct_elements_of_array(result, size);
}
EIGEN_CATCH(...)
{
aligned_free(result);
EIGEN_THROW;
}
return result;
}
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_new(std::size_t size)
{
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
EIGEN_TRY
{
return construct_elements_of_array(result, size);
}
EIGEN_CATCH(...)
{
conditional_aligned_free<Align>(result);
EIGEN_THROW;
}
return result;
}
/** \internal Deletes objects constructed with aligned_new
* The \a size parameters tells on how many objects to call the destructor of T.
*/
template<typename T> EIGEN_DEVICE_FUNC inline void aligned_delete(T *ptr, std::size_t size)
{
destruct_elements_of_array<T>(ptr, size);
aligned_free(ptr);
}
/** \internal Deletes objects constructed with conditional_aligned_new
* The \a size parameters tells on how many objects to call the destructor of T.
*/
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_delete(T *ptr, std::size_t size)
{
destruct_elements_of_array<T>(ptr, size);
conditional_aligned_free<Align>(ptr);
}
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_realloc_new(T* pts, std::size_t new_size, std::size_t old_size)
{
check_size_for_overflow<T>(new_size);
check_size_for_overflow<T>(old_size);
if(new_size < old_size)
destruct_elements_of_array(pts+new_size, old_size-new_size);
T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
if(new_size > old_size)
{
EIGEN_TRY
{
construct_elements_of_array(result+old_size, new_size-old_size);
}
EIGEN_CATCH(...)
{
conditional_aligned_free<Align>(result);
EIGEN_THROW;
}
}
return result;
}
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_new_auto(std::size_t size)
{
if(size==0)
return 0; // short-cut. Also fixes Bug 884
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
if(NumTraits<T>::RequireInitialization)
{
EIGEN_TRY
{
construct_elements_of_array(result, size);
}
EIGEN_CATCH(...)
{
conditional_aligned_free<Align>(result);
EIGEN_THROW;
}
}
return result;
}
template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, std::size_t new_size, std::size_t old_size)
{
check_size_for_overflow<T>(new_size);
check_size_for_overflow<T>(old_size);
if(NumTraits<T>::RequireInitialization && (new_size < old_size))
destruct_elements_of_array(pts+new_size, old_size-new_size);
T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
if(NumTraits<T>::RequireInitialization && (new_size > old_size))
{
EIGEN_TRY
{
construct_elements_of_array(result+old_size, new_size-old_size);
}
EIGEN_CATCH(...)
{
conditional_aligned_free<Align>(result);
EIGEN_THROW;
}
}
return result;
}
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_delete_auto(T *ptr, std::size_t size)
{
if(NumTraits<T>::RequireInitialization)
destruct_elements_of_array<T>(ptr, size);
conditional_aligned_free<Align>(ptr);
}
/****************************************************************************/
/** \internal Returns the index of the first element of the array that is well aligned with respect to the requested \a Alignment.
*
* \tparam Alignment requested alignment in Bytes.
* \param array the address of the start of the array
* \param size the size of the array
*
* \note If no element of the array is well aligned or the requested alignment is not a multiple of a scalar,
* the size of the array is returned. For example with SSE, the requested alignment is typically 16-bytes. If
* packet size for the given scalar type is 1, then everything is considered well-aligned.
*
* \note Otherwise, if the Alignment is larger that the scalar size, we rely on the assumptions that sizeof(Scalar) is a
* power of 2. On the other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
* example with Scalar=double on certain 32-bit platforms, see bug #79.
*
* There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
* \sa first_default_aligned()
*/
template<int Alignment, typename Scalar, typename Index>
EIGEN_DEVICE_FUNC inline Index first_aligned(const Scalar* array, Index size)
{
const Index ScalarSize = sizeof(Scalar);
const Index AlignmentSize = Alignment / ScalarSize;
const Index AlignmentMask = AlignmentSize-1;
if(AlignmentSize<=1)
{
// Either the requested alignment if smaller than a scalar, or it exactly match a 1 scalar
// so that all elements of the array have the same alignment.
return 0;
}
else if( (UIntPtr(array) & (sizeof(Scalar)-1)) || (Alignment%ScalarSize)!=0)
{
// The array is not aligned to the size of a single scalar, or the requested alignment is not a multiple of the scalar size.
// Consequently, no element of the array is well aligned.
return size;
}
else
{
Index first = (AlignmentSize - (Index((UIntPtr(array)/sizeof(Scalar))) & AlignmentMask)) & AlignmentMask;
return (first < size) ? first : size;
}
}
/** \internal Returns the index of the first element of the array that is well aligned with respect the largest packet requirement.
* \sa first_aligned(Scalar*,Index) and first_default_aligned(DenseBase<Derived>) */
template<typename Scalar, typename Index>
EIGEN_DEVICE_FUNC inline Index first_default_aligned(const Scalar* array, Index size)
{
typedef typename packet_traits<Scalar>::type DefaultPacketType;
return first_aligned<unpacket_traits<DefaultPacketType>::alignment>(array, size);
}
/** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size
*/
template<typename Index>
inline Index first_multiple(Index size, Index base)
{
return ((size+base-1)/base)*base;
}
// std::copy is much slower than memcpy, so let's introduce a smart_copy which
// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
template<typename T, bool UseMemcpy> struct smart_copy_helper;
template<typename T> EIGEN_DEVICE_FUNC void smart_copy(const T* start, const T* end, T* target)
{
smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
}
template<typename T> struct smart_copy_helper<T,true> {
EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target)
{
IntPtr size = IntPtr(end)-IntPtr(start);
if(size==0) return;
eigen_internal_assert(start!=0 && end!=0 && target!=0);
memcpy(target, start, size);
}
};
template<typename T> struct smart_copy_helper<T,false> {
EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target)
{ std::copy(start, end, target); }
};
// intelligent memmove. falls back to std::memmove for POD types, uses std::copy otherwise.
template<typename T, bool UseMemmove> struct smart_memmove_helper;
template<typename T> void smart_memmove(const T* start, const T* end, T* target)
{
smart_memmove_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
}
template<typename T> struct smart_memmove_helper<T,true> {
static inline void run(const T* start, const T* end, T* target)
{
IntPtr size = IntPtr(end)-IntPtr(start);
if(size==0) return;
eigen_internal_assert(start!=0 && end!=0 && target!=0);
std::memmove(target, start, size);
}
};
template<typename T> struct smart_memmove_helper<T,false> {
static inline void run(const T* start, const T* end, T* target)
{
if (UIntPtr(target) < UIntPtr(start))
{
std::copy(start, end, target);
}
else
{
std::ptrdiff_t count = (std::ptrdiff_t(end)-std::ptrdiff_t(start)) / sizeof(T);
std::copy_backward(start, end, target + count);
}
}
};
/*****************************************************************************
*** Implementation of runtime stack allocation (falling back to malloc) ***
*****************************************************************************/
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
// to the appropriate stack allocation function
#ifndef EIGEN_ALLOCA
#if EIGEN_OS_LINUX || EIGEN_OS_MAC || (defined alloca)
#define EIGEN_ALLOCA alloca
#elif EIGEN_COMP_MSVC
#define EIGEN_ALLOCA _alloca
#endif
#endif
// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
template<typename T> class aligned_stack_memory_handler : noncopyable
{
public:
/* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size.
* Note that \a ptr can be 0 regardless of the other parameters.
* This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization).
* In this case, the buffer elements will also be destructed when this handler will be destructed.
* Finally, if \a dealloc is true, then the pointer \a ptr is freed.
**/
aligned_stack_memory_handler(T* ptr, std::size_t size, bool dealloc)
: m_ptr(ptr), m_size(size), m_deallocate(dealloc)
{
if(NumTraits<T>::RequireInitialization && m_ptr)
Eigen::internal::construct_elements_of_array(m_ptr, size);
}
~aligned_stack_memory_handler()
{
if(NumTraits<T>::RequireInitialization && m_ptr)
Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);
if(m_deallocate)
Eigen::internal::aligned_free(m_ptr);
}
protected:
T* m_ptr;
std::size_t m_size;
bool m_deallocate;
};
template<typename T> class scoped_array : noncopyable
{
T* m_ptr;
public:
explicit scoped_array(std::ptrdiff_t size)
{
m_ptr = new T[size];
}
~scoped_array()
{
delete[] m_ptr;
}
T& operator[](std::ptrdiff_t i) { return m_ptr[i]; }
const T& operator[](std::ptrdiff_t i) const { return m_ptr[i]; }
T* &ptr() { return m_ptr; }
const T* ptr() const { return m_ptr; }
operator const T*() const { return m_ptr; }
};
template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
{
std::swap(a.ptr(),b.ptr());
}
} // end namespace internal
/** \internal
* Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
* if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
* (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
* The allocated buffer is automatically deleted when exiting the scope of this declaration.
* If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
* Here is an example:
* \code
* {
* ei_declare_aligned_stack_constructed_variable(float,data,size,0);
* // use data[0] to data[size-1]
* }
* \endcode
* The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
*/
#ifdef EIGEN_ALLOCA
#if EIGEN_DEFAULT_ALIGN_BYTES>0
// We always manually re-align the result of EIGEN_ALLOCA.
// If alloca is already aligned, the compiler should be smart enough to optimize away the re-alignment.
#define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((internal::UIntPtr(EIGEN_ALLOCA(SIZE+EIGEN_DEFAULT_ALIGN_BYTES-1)) + EIGEN_DEFAULT_ALIGN_BYTES-1) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)))
#else
#define EIGEN_ALIGNED_ALLOCA(SIZE) EIGEN_ALLOCA(SIZE)
#endif
#define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
: reinterpret_cast<TYPE*>( \
(sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
: Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \
Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
#else
#define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \
Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
#endif
/*****************************************************************************
*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] ***
*****************************************************************************/
#if EIGEN_MAX_ALIGN_BYTES!=0
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
void* operator new(std::size_t size, const std::nothrow_t&) EIGEN_NO_THROW { \
EIGEN_TRY { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
EIGEN_CATCH (...) { return 0; } \
}
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
void *operator new(std::size_t size) { \
return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
} \
void *operator new[](std::size_t size) { \
return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
} \
void operator delete(void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
void operator delete[](void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
void operator delete(void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
void operator delete[](void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
/* in-place new and delete. since (at least afaik) there is no actual */ \
/* memory allocated we can safely let the default implementation handle */ \
/* this particular case. */ \
static void *operator new(std::size_t size, void *ptr) { return ::operator new(size,ptr); } \
static void *operator new[](std::size_t size, void* ptr) { return ::operator new[](size,ptr); } \
void operator delete(void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete(memory,ptr); } \
void operator delete[](void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete[](memory,ptr); } \
/* nothrow-new (returns zero instead of std::bad_alloc) */ \
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
void operator delete(void *ptr, const std::nothrow_t&) EIGEN_NO_THROW { \
Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
} \
typedef void eigen_aligned_operator_new_marker_type;
#else
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
#endif
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%EIGEN_MAX_ALIGN_BYTES==0)))
/****************************************************************************/
/** \class aligned_allocator
* \ingroup Core_Module
*
* \brief STL compatible allocator to use with with 16 byte aligned types
*
* Example:
* \code
* // Matrix4f requires 16 bytes alignment:
* std::map< int, Matrix4f, std::less<int>,
* aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
* std::map< int, Vector3f > my_map_vec3;
* \endcode
*
* \sa \blank \ref TopicStlContainers.
*/
template<class T>
class aligned_allocator : public std::allocator<T>
{
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template<class U>
struct rebind
{
typedef aligned_allocator<U> other;
};
aligned_allocator() : std::allocator<T>() {}
aligned_allocator(const aligned_allocator& other) : std::allocator<T>(other) {}
template<class U>
aligned_allocator(const aligned_allocator<U>& other) : std::allocator<T>(other) {}
~aligned_allocator() {}
pointer allocate(size_type num, const void* /*hint*/ = 0)
{
internal::check_size_for_overflow<T>(num);
return static_cast<pointer>( internal::aligned_malloc(num * sizeof(T)) );
}
void deallocate(pointer p, size_type /*num*/)
{
internal::aligned_free(p);
}
};
//---------- Cache sizes ----------
#if !defined(EIGEN_NO_CPUID)
# if EIGEN_COMP_GNUC && EIGEN_ARCH_i386_OR_x86_64
# if defined(__PIC__) && EIGEN_ARCH_i386
// Case for x86 with PIC
# define EIGEN_CPUID(abcd,func,id) \
__asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
# elif defined(__PIC__) && EIGEN_ARCH_x86_64
// Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model.
// However, we cannot detect which code model is used, and the xchg overhead is negligible anyway.
# define EIGEN_CPUID(abcd,func,id) \
__asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id));
# else
// Case for x86_64 or x86 w/o PIC
# define EIGEN_CPUID(abcd,func,id) \
__asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
# endif
# elif EIGEN_COMP_MSVC
# if (EIGEN_COMP_MSVC > 1500) && EIGEN_ARCH_i386_OR_x86_64
# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
# endif
# endif
#endif
namespace internal {
#ifdef EIGEN_CPUID
inline bool cpuid_is_vendor(int abcd[4], const int vendor[3])
{
return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];
}
inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
{
int abcd[4];
l1 = l2 = l3 = 0;
int cache_id = 0;
int cache_type = 0;
do {
abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
EIGEN_CPUID(abcd,0x4,cache_id);
cache_type = (abcd[0] & 0x0F) >> 0;
if(cache_type==1||cache_type==3) // data or unified cache
{
int cache_level = (abcd[0] & 0xE0) >> 5; // A[7:5]
int ways = (abcd[1] & 0xFFC00000) >> 22; // B[31:22]
int partitions = (abcd[1] & 0x003FF000) >> 12; // B[21:12]
int line_size = (abcd[1] & 0x00000FFF) >> 0; // B[11:0]
int sets = (abcd[2]); // C[31:0]
int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);
switch(cache_level)
{
case 1: l1 = cache_size; break;
case 2: l2 = cache_size; break;
case 3: l3 = cache_size; break;
default: break;
}
}
cache_id++;
} while(cache_type>0 && cache_id<16);
}
inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3)
{
int abcd[4];
abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
l1 = l2 = l3 = 0;
EIGEN_CPUID(abcd,0x00000002,0);
unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2;
bool check_for_p2_core2 = false;
for(int i=0; i<14; ++i)
{
switch(bytes[i])
{
case 0x0A: l1 = 8; break; // 0Ah data L1 cache, 8 KB, 2 ways, 32 byte lines
case 0x0C: l1 = 16; break; // 0Ch data L1 cache, 16 KB, 4 ways, 32 byte lines
case 0x0E: l1 = 24; break; // 0Eh data L1 cache, 24 KB, 6 ways, 64 byte lines
case 0x10: l1 = 16; break; // 10h data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
case 0x15: l1 = 16; break; // 15h code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
case 0x2C: l1 = 32; break; // 2Ch data L1 cache, 32 KB, 8 ways, 64 byte lines
case 0x30: l1 = 32; break; // 30h code L1 cache, 32 KB, 8 ways, 64 byte lines
case 0x60: l1 = 16; break; // 60h data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored
case 0x66: l1 = 8; break; // 66h data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored
case 0x67: l1 = 16; break; // 67h data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored
case 0x68: l1 = 32; break; // 68h data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored
case 0x1A: l2 = 96; break; // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)
case 0x22: l3 = 512; break; // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored
case 0x23: l3 = 1024; break; // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
case 0x25: l3 = 2048; break; // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored
case 0x29: l3 = 4096; break; // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored
case 0x39: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored
case 0x3A: l2 = 192; break; // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored
case 0x3B: l2 = 128; break; // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored
case 0x3C: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored
case 0x3D: l2 = 384; break; // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored
case 0x3E: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored
case 0x40: l2 = 0; break; // no integrated L2 cache (P6 core) or L3 cache (P4 core)
case 0x41: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 32 byte lines
case 0x42: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 32 byte lines
case 0x43: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 32 byte lines
case 0x44: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines
case 0x45: l2 = 2048; break; // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines
case 0x46: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines
case 0x47: l3 = 8192; break; // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines
case 0x48: l2 = 3072; break; // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines
case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2
case 0x4A: l3 = 6144; break; // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines
case 0x4B: l3 = 8192; break; // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines
case 0x4C: l3 = 12288; break; // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines
case 0x4D: l3 = 16384; break; // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines
case 0x4E: l2 = 6144; break; // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines
case 0x78: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines
case 0x79: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored
case 0x7A: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored
case 0x7B: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored
case 0x7C: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
case 0x7D: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines
case 0x7E: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)
case 0x7F: l2 = 512; break; // code and data L2 cache, 512 KB, 2 ways, 64 byte lines
case 0x80: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines
case 0x81: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 32 byte lines
case 0x82: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 32 byte lines
case 0x83: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 32 byte lines
case 0x84: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines
case 0x85: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines
case 0x86: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines
case 0x87: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines
case 0x88: l3 = 2048; break; // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64)
case 0x89: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64)
case 0x8A: l3 = 8192; break; // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64)
case 0x8D: l3 = 3072; break; // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64)
default: break;
}
}
if(check_for_p2_core2 && l2 == l3)
l3 = 0;
l1 *= 1024;
l2 *= 1024;
l3 *= 1024;
}
inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)
{
if(max_std_funcs>=4)
queryCacheSizes_intel_direct(l1,l2,l3);
else
queryCacheSizes_intel_codes(l1,l2,l3);
}
inline void queryCacheSizes_amd(int& l1, int& l2, int& l3)
{
int abcd[4];
abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
EIGEN_CPUID(abcd,0x80000005,0);
l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
EIGEN_CPUID(abcd,0x80000006,0);
l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
}
#endif
/** \internal
* Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */
inline void queryCacheSizes(int& l1, int& l2, int& l3)
{
#ifdef EIGEN_CPUID
int abcd[4];
const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
// identify the CPU vendor
EIGEN_CPUID(abcd,0x0,0);
int max_std_funcs = abcd[1];
if(cpuid_is_vendor(abcd,GenuineIntel))
queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
queryCacheSizes_amd(l1,l2,l3);
else
// by default let's use Intel's API
queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
// here is the list of other vendors:
// ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
// ||cpuid_is_vendor(abcd,"CyrixInstead")
// ||cpuid_is_vendor(abcd,"CentaurHauls")
// ||cpuid_is_vendor(abcd,"GenuineTMx86")
// ||cpuid_is_vendor(abcd,"TransmetaCPU")
// ||cpuid_is_vendor(abcd,"RiseRiseRise")
// ||cpuid_is_vendor(abcd,"Geode by NSC")
// ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
// ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
// ||cpuid_is_vendor(abcd,"NexGenDriven")
#else
l1 = l2 = l3 = -1;
#endif
}
/** \internal
* \returns the size in Bytes of the L1 data cache */
inline int queryL1CacheSize()
{
int l1(-1), l2, l3;
queryCacheSizes(l1,l2,l3);
return l1;
}
/** \internal
* \returns the size in Bytes of the L2 or L3 cache if this later is present */
inline int queryTopLevelCacheSize()
{
int l1, l2(-1), l3(-1);
queryCacheSizes(l1,l2,l3);
return (std::max)(l2,l3);
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_MEMORY_H
| 39,712 | 39.606339 | 207 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/Meta.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_META_H
#define EIGEN_META_H
#if defined(__CUDA_ARCH__)
#include <cfloat>
#include <math_constants.h>
#endif
#if EIGEN_COMP_ICC>=1600 && __cplusplus >= 201103L
#include <cstdint>
#endif
namespace Eigen {
typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;
/**
* \brief The Index type as used for the API.
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
* \sa \blank \ref TopicPreprocessorDirectives, StorageIndex.
*/
typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE Index;
namespace internal {
/** \internal
* \file Meta.h
* This file contains generic metaprogramming classes which are not specifically related to Eigen.
* \note In case you wonder, yes we're aware that Boost already provides all these features,
* we however don't want to add a dependency to Boost.
*/
// Only recent versions of ICC complain about using ptrdiff_t to hold pointers,
// and older versions do not provide *intptr_t types.
#if EIGEN_COMP_ICC>=1600 && __cplusplus >= 201103L
typedef std::intptr_t IntPtr;
typedef std::uintptr_t UIntPtr;
#else
typedef std::ptrdiff_t IntPtr;
typedef std::size_t UIntPtr;
#endif
struct true_type { enum { value = 1 }; };
struct false_type { enum { value = 0 }; };
template<bool Condition, typename Then, typename Else>
struct conditional { typedef Then type; };
template<typename Then, typename Else>
struct conditional <false, Then, Else> { typedef Else type; };
template<typename T, typename U> struct is_same { enum { value = 0 }; };
template<typename T> struct is_same<T,T> { enum { value = 1 }; };
template<typename T> struct remove_reference { typedef T type; };
template<typename T> struct remove_reference<T&> { typedef T type; };
template<typename T> struct remove_pointer { typedef T type; };
template<typename T> struct remove_pointer<T*> { typedef T type; };
template<typename T> struct remove_pointer<T*const> { typedef T type; };
template <class T> struct remove_const { typedef T type; };
template <class T> struct remove_const<const T> { typedef T type; };
template <class T> struct remove_const<const T[]> { typedef T type[]; };
template <class T, unsigned int Size> struct remove_const<const T[Size]> { typedef T type[Size]; };
template<typename T> struct remove_all { typedef T type; };
template<typename T> struct remove_all<const T> { typedef typename remove_all<T>::type type; };
template<typename T> struct remove_all<T const&> { typedef typename remove_all<T>::type type; };
template<typename T> struct remove_all<T&> { typedef typename remove_all<T>::type type; };
template<typename T> struct remove_all<T const*> { typedef typename remove_all<T>::type type; };
template<typename T> struct remove_all<T*> { typedef typename remove_all<T>::type type; };
template<typename T> struct is_arithmetic { enum { value = false }; };
template<> struct is_arithmetic<float> { enum { value = true }; };
template<> struct is_arithmetic<double> { enum { value = true }; };
template<> struct is_arithmetic<long double> { enum { value = true }; };
template<> struct is_arithmetic<bool> { enum { value = true }; };
template<> struct is_arithmetic<char> { enum { value = true }; };
template<> struct is_arithmetic<signed char> { enum { value = true }; };
template<> struct is_arithmetic<unsigned char> { enum { value = true }; };
template<> struct is_arithmetic<signed short> { enum { value = true }; };
template<> struct is_arithmetic<unsigned short>{ enum { value = true }; };
template<> struct is_arithmetic<signed int> { enum { value = true }; };
template<> struct is_arithmetic<unsigned int> { enum { value = true }; };
template<> struct is_arithmetic<signed long> { enum { value = true }; };
template<> struct is_arithmetic<unsigned long> { enum { value = true }; };
template<typename T> struct is_integral { enum { value = false }; };
template<> struct is_integral<bool> { enum { value = true }; };
template<> struct is_integral<char> { enum { value = true }; };
template<> struct is_integral<signed char> { enum { value = true }; };
template<> struct is_integral<unsigned char> { enum { value = true }; };
template<> struct is_integral<signed short> { enum { value = true }; };
template<> struct is_integral<unsigned short> { enum { value = true }; };
template<> struct is_integral<signed int> { enum { value = true }; };
template<> struct is_integral<unsigned int> { enum { value = true }; };
template<> struct is_integral<signed long> { enum { value = true }; };
template<> struct is_integral<unsigned long> { enum { value = true }; };
template <typename T> struct add_const { typedef const T type; };
template <typename T> struct add_const<T&> { typedef T& type; };
template <typename T> struct is_const { enum { value = 0 }; };
template <typename T> struct is_const<T const> { enum { value = 1 }; };
template<typename T> struct add_const_on_value_type { typedef const T type; };
template<typename T> struct add_const_on_value_type<T&> { typedef T const& type; };
template<typename T> struct add_const_on_value_type<T*> { typedef T const* type; };
template<typename T> struct add_const_on_value_type<T* const> { typedef T const* const type; };
template<typename T> struct add_const_on_value_type<T const* const> { typedef T const* const type; };
template<typename From, typename To>
struct is_convertible_impl
{
private:
struct any_conversion
{
template <typename T> any_conversion(const volatile T&);
template <typename T> any_conversion(T&);
};
struct yes {int a[1];};
struct no {int a[2];};
static yes test(const To&, int);
static no test(any_conversion, ...);
public:
static From ms_from;
#ifdef __INTEL_COMPILER
#pragma warning push
#pragma warning ( disable : 2259 )
#endif
enum { value = sizeof(test(ms_from, 0))==sizeof(yes) };
#ifdef __INTEL_COMPILER
#pragma warning pop
#endif
};
template<typename From, typename To>
struct is_convertible
{
enum { value = is_convertible_impl<typename remove_all<From>::type,
typename remove_all<To >::type>::value };
};
/** \internal Allows to enable/disable an overload
* according to a compile time condition.
*/
template<bool Condition, typename T=void> struct enable_if;
template<typename T> struct enable_if<true,T>
{ typedef T type; };
#if defined(__CUDA_ARCH__)
#if !defined(__FLT_EPSILON__)
#define __FLT_EPSILON__ FLT_EPSILON
#define __DBL_EPSILON__ DBL_EPSILON
#endif
namespace device {
template<typename T> struct numeric_limits
{
EIGEN_DEVICE_FUNC
static T epsilon() { return 0; }
static T (max)() { assert(false && "Highest not supported for this type"); }
static T (min)() { assert(false && "Lowest not supported for this type"); }
static T infinity() { assert(false && "Infinity not supported for this type"); }
static T quiet_NaN() { assert(false && "quiet_NaN not supported for this type"); }
};
template<> struct numeric_limits<float>
{
EIGEN_DEVICE_FUNC
static float epsilon() { return __FLT_EPSILON__; }
EIGEN_DEVICE_FUNC
static float (max)() { return CUDART_MAX_NORMAL_F; }
EIGEN_DEVICE_FUNC
static float (min)() { return FLT_MIN; }
EIGEN_DEVICE_FUNC
static float infinity() { return CUDART_INF_F; }
EIGEN_DEVICE_FUNC
static float quiet_NaN() { return CUDART_NAN_F; }
};
template<> struct numeric_limits<double>
{
EIGEN_DEVICE_FUNC
static double epsilon() { return __DBL_EPSILON__; }
EIGEN_DEVICE_FUNC
static double (max)() { return DBL_MAX; }
EIGEN_DEVICE_FUNC
static double (min)() { return DBL_MIN; }
EIGEN_DEVICE_FUNC
static double infinity() { return CUDART_INF; }
EIGEN_DEVICE_FUNC
static double quiet_NaN() { return CUDART_NAN; }
};
template<> struct numeric_limits<int>
{
EIGEN_DEVICE_FUNC
static int epsilon() { return 0; }
EIGEN_DEVICE_FUNC
static int (max)() { return INT_MAX; }
EIGEN_DEVICE_FUNC
static int (min)() { return INT_MIN; }
};
template<> struct numeric_limits<unsigned int>
{
EIGEN_DEVICE_FUNC
static unsigned int epsilon() { return 0; }
EIGEN_DEVICE_FUNC
static unsigned int (max)() { return UINT_MAX; }
EIGEN_DEVICE_FUNC
static unsigned int (min)() { return 0; }
};
template<> struct numeric_limits<long>
{
EIGEN_DEVICE_FUNC
static long epsilon() { return 0; }
EIGEN_DEVICE_FUNC
static long (max)() { return LONG_MAX; }
EIGEN_DEVICE_FUNC
static long (min)() { return LONG_MIN; }
};
template<> struct numeric_limits<unsigned long>
{
EIGEN_DEVICE_FUNC
static unsigned long epsilon() { return 0; }
EIGEN_DEVICE_FUNC
static unsigned long (max)() { return ULONG_MAX; }
EIGEN_DEVICE_FUNC
static unsigned long (min)() { return 0; }
};
template<> struct numeric_limits<long long>
{
EIGEN_DEVICE_FUNC
static long long epsilon() { return 0; }
EIGEN_DEVICE_FUNC
static long long (max)() { return LLONG_MAX; }
EIGEN_DEVICE_FUNC
static long long (min)() { return LLONG_MIN; }
};
template<> struct numeric_limits<unsigned long long>
{
EIGEN_DEVICE_FUNC
static unsigned long long epsilon() { return 0; }
EIGEN_DEVICE_FUNC
static unsigned long long (max)() { return ULLONG_MAX; }
EIGEN_DEVICE_FUNC
static unsigned long long (min)() { return 0; }
};
}
#endif
/** \internal
* A base class do disable default copy ctor and copy assignement operator.
*/
class noncopyable
{
EIGEN_DEVICE_FUNC noncopyable(const noncopyable&);
EIGEN_DEVICE_FUNC const noncopyable& operator=(const noncopyable&);
protected:
EIGEN_DEVICE_FUNC noncopyable() {}
EIGEN_DEVICE_FUNC ~noncopyable() {}
};
/** \internal
* Convenient struct to get the result type of a unary or binary functor.
*
* It supports both the current STL mechanism (using the result_type member) as well as
* upcoming next STL generation (using a templated result member).
* If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack.
*/
#if EIGEN_HAS_STD_RESULT_OF
template<typename T> struct result_of {
typedef typename std::result_of<T>::type type1;
typedef typename remove_all<type1>::type type;
};
#else
template<typename T> struct result_of { };
struct has_none {int a[1];};
struct has_std_result_type {int a[2];};
struct has_tr1_result {int a[3];};
template<typename Func, typename ArgType, int SizeOf=sizeof(has_none)>
struct unary_result_of_select {typedef typename internal::remove_all<ArgType>::type type;};
template<typename Func, typename ArgType>
struct unary_result_of_select<Func, ArgType, sizeof(has_std_result_type)> {typedef typename Func::result_type type;};
template<typename Func, typename ArgType>
struct unary_result_of_select<Func, ArgType, sizeof(has_tr1_result)> {typedef typename Func::template result<Func(ArgType)>::type type;};
template<typename Func, typename ArgType>
struct result_of<Func(ArgType)> {
template<typename T>
static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
template<typename T>
static has_tr1_result testFunctor(T const *, typename T::template result<T(ArgType)>::type const * = 0);
static has_none testFunctor(...);
// note that the following indirection is needed for gcc-3.3
enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
typedef typename unary_result_of_select<Func, ArgType, FunctorType>::type type;
};
template<typename Func, typename ArgType0, typename ArgType1, int SizeOf=sizeof(has_none)>
struct binary_result_of_select {typedef typename internal::remove_all<ArgType0>::type type;};
template<typename Func, typename ArgType0, typename ArgType1>
struct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_std_result_type)>
{typedef typename Func::result_type type;};
template<typename Func, typename ArgType0, typename ArgType1>
struct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_tr1_result)>
{typedef typename Func::template result<Func(ArgType0,ArgType1)>::type type;};
template<typename Func, typename ArgType0, typename ArgType1>
struct result_of<Func(ArgType0,ArgType1)> {
template<typename T>
static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
template<typename T>
static has_tr1_result testFunctor(T const *, typename T::template result<T(ArgType0,ArgType1)>::type const * = 0);
static has_none testFunctor(...);
// note that the following indirection is needed for gcc-3.3
enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
typedef typename binary_result_of_select<Func, ArgType0, ArgType1, FunctorType>::type type;
};
template<typename Func, typename ArgType0, typename ArgType1, typename ArgType2, int SizeOf=sizeof(has_none)>
struct ternary_result_of_select {typedef typename internal::remove_all<ArgType0>::type type;};
template<typename Func, typename ArgType0, typename ArgType1, typename ArgType2>
struct ternary_result_of_select<Func, ArgType0, ArgType1, ArgType2, sizeof(has_std_result_type)>
{typedef typename Func::result_type type;};
template<typename Func, typename ArgType0, typename ArgType1, typename ArgType2>
struct ternary_result_of_select<Func, ArgType0, ArgType1, ArgType2, sizeof(has_tr1_result)>
{typedef typename Func::template result<Func(ArgType0,ArgType1,ArgType2)>::type type;};
template<typename Func, typename ArgType0, typename ArgType1, typename ArgType2>
struct result_of<Func(ArgType0,ArgType1,ArgType2)> {
template<typename T>
static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
template<typename T>
static has_tr1_result testFunctor(T const *, typename T::template result<T(ArgType0,ArgType1,ArgType2)>::type const * = 0);
static has_none testFunctor(...);
// note that the following indirection is needed for gcc-3.3
enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
typedef typename ternary_result_of_select<Func, ArgType0, ArgType1, ArgType2, FunctorType>::type type;
};
#endif
struct meta_yes { char a[1]; };
struct meta_no { char a[2]; };
// Check whether T::ReturnType does exist
template <typename T>
struct has_ReturnType
{
template <typename C> static meta_yes testFunctor(typename C::ReturnType const *);
template <typename C> static meta_no testFunctor(...);
enum { value = sizeof(testFunctor<T>(0)) == sizeof(meta_yes) };
};
template<typename T> const T* return_ptr();
template <typename T, typename IndexType=Index>
struct has_nullary_operator
{
template <typename C> static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr<C>()->operator()())>0)>::type * = 0);
static meta_no testFunctor(...);
enum { value = sizeof(testFunctor(static_cast<T*>(0))) == sizeof(meta_yes) };
};
template <typename T, typename IndexType=Index>
struct has_unary_operator
{
template <typename C> static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr<C>()->operator()(IndexType(0)))>0)>::type * = 0);
static meta_no testFunctor(...);
enum { value = sizeof(testFunctor(static_cast<T*>(0))) == sizeof(meta_yes) };
};
template <typename T, typename IndexType=Index>
struct has_binary_operator
{
template <typename C> static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr<C>()->operator()(IndexType(0),IndexType(0)))>0)>::type * = 0);
static meta_no testFunctor(...);
enum { value = sizeof(testFunctor(static_cast<T*>(0))) == sizeof(meta_yes) };
};
/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer.
* Usage example: \code meta_sqrt<1023>::ret \endcode
*/
template<int Y,
int InfX = 0,
int SupX = ((Y==1) ? 1 : Y/2),
bool Done = ((SupX-InfX)<=1 ? true : ((SupX*SupX <= Y) && ((SupX+1)*(SupX+1) > Y))) >
// use ?: instead of || just to shut up a stupid gcc 4.3 warning
class meta_sqrt
{
enum {
MidX = (InfX+SupX)/2,
TakeInf = MidX*MidX > Y ? 1 : 0,
NewInf = int(TakeInf) ? InfX : int(MidX),
NewSup = int(TakeInf) ? int(MidX) : SupX
};
public:
enum { ret = meta_sqrt<Y,NewInf,NewSup>::ret };
};
template<int Y, int InfX, int SupX>
class meta_sqrt<Y, InfX, SupX, true> { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };
/** \internal Computes the least common multiple of two positive integer A and B
* at compile-time. It implements a naive algorithm testing all multiples of A.
* It thus works better if A>=B.
*/
template<int A, int B, int K=1, bool Done = ((A*K)%B)==0>
struct meta_least_common_multiple
{
enum { ret = meta_least_common_multiple<A,B,K+1>::ret };
};
template<int A, int B, int K>
struct meta_least_common_multiple<A,B,K,true>
{
enum { ret = A*K };
};
/** \internal determines whether the product of two numeric types is allowed and what the return type is */
template<typename T, typename U> struct scalar_product_traits
{
enum { Defined = 0 };
};
// FIXME quick workaround around current limitation of result_of
// template<typename Scalar, typename ArgType0, typename ArgType1>
// struct result_of<scalar_product_op<Scalar>(ArgType0,ArgType1)> {
// typedef typename scalar_product_traits<typename remove_all<ArgType0>::type, typename remove_all<ArgType1>::type>::ReturnType type;
// };
} // end namespace internal
namespace numext {
#if defined(__CUDA_ARCH__)
template<typename T> EIGEN_DEVICE_FUNC void swap(T &a, T &b) { T tmp = b; b = a; a = tmp; }
#else
template<typename T> EIGEN_STRONG_INLINE void swap(T &a, T &b) { std::swap(a,b); }
#endif
#if defined(__CUDA_ARCH__)
using internal::device::numeric_limits;
#else
using std::numeric_limits;
#endif
// Integer division with rounding up.
// T is assumed to be an integer type with a>=0, and b>0
template<typename T>
T div_ceil(const T &a, const T &b)
{
return (a+b-1) / b;
}
} // end namespace numext
} // end namespace Eigen
#endif // EIGEN_META_H
| 18,481 | 36.488844 | 162 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/NonMPL2.h
|
#ifdef EIGEN_MPL2_ONLY
#error Including non-MPL2 code in EIGEN_MPL2_ONLY mode
#endif
| 85 | 20.5 | 54 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/ReenableStupidWarnings.h
|
#ifdef EIGEN_WARNINGS_DISABLED
#undef EIGEN_WARNINGS_DISABLED
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
#ifdef _MSC_VER
#pragma warning( pop )
#elif defined __INTEL_COMPILER
#pragma warning pop
#elif defined __clang__
#pragma clang diagnostic pop
#elif defined __GNUC__ && __GNUC__>=6
#pragma GCC diagnostic pop
#endif
#if defined __NVCC__
// Don't reenable the diagnostic messages, as it turns out these messages need
// to be disabled at the point of the template instantiation (i.e the user code)
// otherwise they'll be triggered by nvcc.
// #pragma diag_default code_is_unreachable
// #pragma diag_default initialization_not_reachable
// #pragma diag_default 2651
// #pragma diag_default 2653
#endif
#endif
#endif // EIGEN_WARNINGS_DISABLED
| 809 | 27.928571 | 83 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/StaticAssert.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_STATIC_ASSERT_H
#define EIGEN_STATIC_ASSERT_H
/* Some notes on Eigen's static assertion mechanism:
*
* - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean
* expression, and MSG an enum listed in struct internal::static_assertion<true>
*
* - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time)
* in that case, the static assertion is converted to the following runtime assert:
* eigen_assert(CONDITION && "MSG")
*
* - currently EIGEN_STATIC_ASSERT can only be used in function scope
*
*/
#ifndef EIGEN_NO_STATIC_ASSERT
#if EIGEN_MAX_CPP_VER>=11 && (__has_feature(cxx_static_assert) || (defined(__cplusplus) && __cplusplus >= 201103L) || (EIGEN_COMP_MSVC >= 1600))
// if native static_assert is enabled, let's use it
#define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
#else // not CXX0X
namespace Eigen {
namespace internal {
template<bool condition>
struct static_assertion {};
template<>
struct static_assertion<true>
{
enum {
YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,
YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES,
YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES,
THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE,
THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE,
THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE,
OUT_OF_RANGE_ACCESS,
YOU_MADE_A_PROGRAMMING_MISTAKE,
EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT,
EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE,
YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR,
YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR,
UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC,
THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES,
FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED,
NUMERIC_TYPE_MUST_BE_REAL,
COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED,
WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED,
THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE,
INVALID_MATRIX_PRODUCT,
INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS,
INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION,
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY,
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES,
THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES,
INVALID_MATRIX_TEMPLATE_PARAMETERS,
INVALID_MATRIXBASE_TEMPLATE_PARAMETERS,
BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER,
THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX,
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE,
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES,
YOU_ALREADY_SPECIFIED_THIS_STRIDE,
INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION,
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD,
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1,
THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS,
YOU_CANNOT_MIX_ARRAYS_AND_MATRICES,
YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION,
THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY,
YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT,
THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS,
THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS,
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL,
THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES,
YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED,
YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG,
IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY,
STORAGE_LAYOUT_DOES_NOT_MATCH,
EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE,
THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS,
MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY,
THIS_TYPE_IS_NOT_SUPPORTED,
STORAGE_KIND_MUST_MATCH,
STORAGE_INDEX_MUST_MATCH,
CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY
};
};
} // end namespace internal
} // end namespace Eigen
// Specialized implementation for MSVC to avoid "conditional
// expression is constant" warnings. This implementation doesn't
// appear to work under GCC, hence the multiple implementations.
#if EIGEN_COMP_MSVC
#define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
{Eigen::internal::static_assertion<bool(CONDITION)>::MSG;}
#else
// In some cases clang interprets bool(CONDITION) as function declaration
#define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
if (Eigen::internal::static_assertion<static_cast<bool>(CONDITION)>::MSG) {}
#endif
#endif // not CXX0X
#else // EIGEN_NO_STATIC_ASSERT
#define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
#endif // EIGEN_NO_STATIC_ASSERT
// static assertion failing if the type \a TYPE is not a vector type
#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \
EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \
YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX)
// static assertion failing if the type \a TYPE is not fixed-size
#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \
EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \
YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR)
// static assertion failing if the type \a TYPE is not dynamic-size
#define EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) \
EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime==Eigen::Dynamic, \
YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR)
// static assertion failing if the type \a TYPE is not a vector type of the given size
#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \
EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \
THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE)
// static assertion failing if the type \a TYPE is not a vector type of the given size
#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \
EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \
THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE)
// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size)
#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \
EIGEN_STATIC_ASSERT( \
(int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \
|| int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \
|| int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\
YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES)
#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
( \
(int(Eigen::internal::size_of_xpr_at_compile_time<TYPE0>::ret)==0 && int(Eigen::internal::size_of_xpr_at_compile_time<TYPE1>::ret)==0) \
|| (\
(int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \
|| int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \
|| int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \
&& (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \
|| int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \
|| int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))\
) \
)
#define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
EIGEN_STATIC_ASSERT(!NumTraits<TYPE>::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes
#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
EIGEN_STATIC_ASSERT( \
EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\
YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)
#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \
EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \
(TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \
THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS)
#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \
EIGEN_STATIC_ASSERT(Eigen::internal::is_lvalue<Derived>::value, \
THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY)
#define EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) \
EIGEN_STATIC_ASSERT((Eigen::internal::is_same<typename Eigen::internal::traits<Derived>::XprKind, ArrayXpr>::value), \
THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES)
#define EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2) \
EIGEN_STATIC_ASSERT((Eigen::internal::is_same<typename Eigen::internal::traits<Derived1>::XprKind, \
typename Eigen::internal::traits<Derived2>::XprKind \
>::value), \
YOU_CANNOT_MIX_ARRAYS_AND_MATRICES)
// Check that a cost value is positive, and that is stay within a reasonable range
// TODO this check could be enabled for internal debugging only
#define EIGEN_INTERNAL_CHECK_COST_VALUE(C) \
EIGEN_STATIC_ASSERT((C)>=0 && (C)<=HugeCost*HugeCost, EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE);
#endif // EIGEN_STATIC_ASSERT_H
| 10,284 | 46.396313 | 146 |
h
|
abess
|
abess-master/python/include/Eigen/src/Core/util/XprHelper.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_XPRHELPER_H
#define EIGEN_XPRHELPER_H
// just a workaround because GCC seems to not really like empty structs
// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
// so currently we simply disable this optimization for gcc 4.3
#if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3)
#define EIGEN_EMPTY_STRUCT_CTOR(X) \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {}
#else
#define EIGEN_EMPTY_STRUCT_CTOR(X)
#endif
namespace Eigen {
namespace internal {
template<typename IndexDest, typename IndexSrc>
EIGEN_DEVICE_FUNC
inline IndexDest convert_index(const IndexSrc& idx) {
// for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
return IndexDest(idx);
}
// promote_scalar_arg is an helper used in operation between an expression and a scalar, like:
// expression * scalar
// Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression.
// The IsSupported template parameter must be provided by the caller as: internal::has_ReturnType<ScalarBinaryOpTraits<ExprScalar,T,op> >::value using the proper order for ExprScalar and T.
// Then the logic is as follows:
// - if the operation is natively supported as defined by IsSupported, then the scalar type is not promoted, and T is returned.
// - otherwise, NumTraits<ExprScalar>::Literal is returned if T is implicitly convertible to NumTraits<ExprScalar>::Literal AND that this does not imply a float to integer conversion.
// - otherwise, ExprScalar is returned if T is implicitly convertible to ExprScalar AND that this does not imply a float to integer conversion.
// - In all other cases, the promoted type is not defined, and the respective operation is thus invalid and not available (SFINAE).
template<typename ExprScalar,typename T, bool IsSupported>
struct promote_scalar_arg;
template<typename S,typename T>
struct promote_scalar_arg<S,T,true>
{
typedef T type;
};
// Recursively check safe conversion to PromotedType, and then ExprScalar if they are different.
template<typename ExprScalar,typename T,typename PromotedType,
bool ConvertibleToLiteral = internal::is_convertible<T,PromotedType>::value,
bool IsSafe = NumTraits<T>::IsInteger || !NumTraits<PromotedType>::IsInteger>
struct promote_scalar_arg_unsupported;
// Start recursion with NumTraits<ExprScalar>::Literal
template<typename S,typename T>
struct promote_scalar_arg<S,T,false> : promote_scalar_arg_unsupported<S,T,typename NumTraits<S>::Literal> {};
// We found a match!
template<typename S,typename T, typename PromotedType>
struct promote_scalar_arg_unsupported<S,T,PromotedType,true,true>
{
typedef PromotedType type;
};
// No match, but no real-to-integer issues, and ExprScalar and current PromotedType are different,
// so let's try to promote to ExprScalar
template<typename ExprScalar,typename T, typename PromotedType>
struct promote_scalar_arg_unsupported<ExprScalar,T,PromotedType,false,true>
: promote_scalar_arg_unsupported<ExprScalar,T,ExprScalar>
{};
// Unsafe real-to-integer, let's stop.
template<typename S,typename T, typename PromotedType, bool ConvertibleToLiteral>
struct promote_scalar_arg_unsupported<S,T,PromotedType,ConvertibleToLiteral,false> {};
// T is not even convertible to ExprScalar, let's stop.
template<typename S,typename T>
struct promote_scalar_arg_unsupported<S,T,S,false,true> {};
//classes inheriting no_assignment_operator don't generate a default operator=.
class no_assignment_operator
{
private:
no_assignment_operator& operator=(const no_assignment_operator&);
};
/** \internal return the index type with the largest number of bits */
template<typename I1, typename I2>
struct promote_index_type
{
typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
};
/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
* can be accessed using value() and setValue().
* Otherwise, this class is an empty structure and value() just returns the template parameter Value.
*/
template<typename T, int Value> class variable_if_dynamic
{
public:
EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}
};
template<typename T> class variable_if_dynamic<T, Dynamic>
{
T m_value;
EIGEN_DEVICE_FUNC variable_if_dynamic() { eigen_assert(false); }
public:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value) : m_value(value) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
};
/** \internal like variable_if_dynamic but for DynamicIndex
*/
template<typename T, int Value> class variable_if_dynamicindex
{
public:
EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}
};
template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
{
T m_value;
EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); }
public:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T value) : m_value(value) {}
EIGEN_DEVICE_FUNC T EIGEN_STRONG_INLINE value() const { return m_value; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
};
template<typename T> struct functor_traits
{
enum
{
Cost = 10,
PacketAccess = false,
IsRepeatable = false
};
};
template<typename T> struct packet_traits;
template<typename T> struct unpacket_traits
{
typedef T type;
typedef T half;
enum
{
size = 1,
alignment = 1
};
};
template<int Size, typename PacketType,
bool Stop = Size==Dynamic || (Size%unpacket_traits<PacketType>::size)==0 || is_same<PacketType,typename unpacket_traits<PacketType>::half>::value>
struct find_best_packet_helper;
template< int Size, typename PacketType>
struct find_best_packet_helper<Size,PacketType,true>
{
typedef PacketType type;
};
template<int Size, typename PacketType>
struct find_best_packet_helper<Size,PacketType,false>
{
typedef typename find_best_packet_helper<Size,typename unpacket_traits<PacketType>::half>::type type;
};
template<typename T, int Size>
struct find_best_packet
{
typedef typename find_best_packet_helper<Size,typename packet_traits<T>::type>::type type;
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES>0
template<int ArrayBytes, int AlignmentBytes,
bool Match = bool((ArrayBytes%AlignmentBytes)==0),
bool TryHalf = bool(EIGEN_MIN_ALIGN_BYTES<AlignmentBytes) >
struct compute_default_alignment_helper
{
enum { value = 0 };
};
template<int ArrayBytes, int AlignmentBytes, bool TryHalf>
struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, true, TryHalf> // Match
{
enum { value = AlignmentBytes };
};
template<int ArrayBytes, int AlignmentBytes>
struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, false, true> // Try-half
{
// current packet too large, try with an half-packet
enum { value = compute_default_alignment_helper<ArrayBytes, AlignmentBytes/2>::value };
};
#else
// If static alignment is disabled, no need to bother.
// This also avoids a division by zero in "bool Match = bool((ArrayBytes%AlignmentBytes)==0)"
template<int ArrayBytes, int AlignmentBytes>
struct compute_default_alignment_helper
{
enum { value = 0 };
};
#endif
template<typename T, int Size> struct compute_default_alignment {
enum { value = compute_default_alignment_helper<Size*sizeof(T),EIGEN_MAX_STATIC_ALIGN_BYTES>::value };
};
template<typename T> struct compute_default_alignment<T,Dynamic> {
enum { value = EIGEN_MAX_ALIGN_BYTES };
};
template<typename _Scalar, int _Rows, int _Cols,
int _Options = AutoAlign |
( (_Rows==1 && _Cols!=1) ? RowMajor
: (_Cols==1 && _Rows!=1) ? ColMajor
: EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
int _MaxRows = _Rows,
int _MaxCols = _Cols
> class make_proper_matrix_type
{
enum {
IsColVector = _Cols==1 && _Rows!=1,
IsRowVector = _Rows==1 && _Cols!=1,
Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
: IsRowVector ? (_Options | RowMajor) & ~ColMajor
: _Options
};
public:
typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
};
template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
class compute_matrix_flags
{
enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0 };
public:
// FIXME currently we still have to handle DirectAccessBit at the expression level to handle DenseCoeffsBase<>
// and then propagate this information to the evaluator's flags.
// However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage.
enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit };
};
template<int _Rows, int _Cols> struct size_at_compile_time
{
enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
};
template<typename XprType> struct size_of_xpr_at_compile_time
{
enum { ret = size_at_compile_time<traits<XprType>::RowsAtCompileTime,traits<XprType>::ColsAtCompileTime>::ret };
};
/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
* whereas eval is a const reference in the case of a matrix
*/
template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
template<typename T, typename BaseClassType, int Flags> struct plain_matrix_type_dense;
template<typename T> struct plain_matrix_type<T,Dense>
{
typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, traits<T>::Flags>::type type;
};
template<typename T> struct plain_matrix_type<T,DiagonalShape>
{
typedef typename T::PlainObject type;
};
template<typename T, int Flags> struct plain_matrix_type_dense<T,MatrixXpr,Flags>
{
typedef Matrix<typename traits<T>::Scalar,
traits<T>::RowsAtCompileTime,
traits<T>::ColsAtCompileTime,
AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
traits<T>::MaxRowsAtCompileTime,
traits<T>::MaxColsAtCompileTime
> type;
};
template<typename T, int Flags> struct plain_matrix_type_dense<T,ArrayXpr,Flags>
{
typedef Array<typename traits<T>::Scalar,
traits<T>::RowsAtCompileTime,
traits<T>::ColsAtCompileTime,
AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
traits<T>::MaxRowsAtCompileTime,
traits<T>::MaxColsAtCompileTime
> type;
};
/* eval : the return type of eval(). For matrices, this is just a const reference
* in order to avoid a useless copy
*/
template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
template<typename T> struct eval<T,Dense>
{
typedef typename plain_matrix_type<T>::type type;
// typedef typename T::PlainObject type;
// typedef T::Matrix<typename traits<T>::Scalar,
// traits<T>::RowsAtCompileTime,
// traits<T>::ColsAtCompileTime,
// AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
// traits<T>::MaxRowsAtCompileTime,
// traits<T>::MaxColsAtCompileTime
// > type;
};
template<typename T> struct eval<T,DiagonalShape>
{
typedef typename plain_matrix_type<T>::type type;
};
// for matrices, no need to evaluate, just use a const reference to avoid a useless copy
template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
{
typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
};
template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
{
typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
};
/* similar to plain_matrix_type, but using the evaluator's Flags */
template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_object_eval;
template<typename T>
struct plain_object_eval<T,Dense>
{
typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, evaluator<T>::Flags>::type type;
};
/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
*/
template<typename T> struct plain_matrix_type_column_major
{
enum { Rows = traits<T>::RowsAtCompileTime,
Cols = traits<T>::ColsAtCompileTime,
MaxRows = traits<T>::MaxRowsAtCompileTime,
MaxCols = traits<T>::MaxColsAtCompileTime
};
typedef Matrix<typename traits<T>::Scalar,
Rows,
Cols,
(MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
MaxRows,
MaxCols
> type;
};
/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
*/
template<typename T> struct plain_matrix_type_row_major
{
enum { Rows = traits<T>::RowsAtCompileTime,
Cols = traits<T>::ColsAtCompileTime,
MaxRows = traits<T>::MaxRowsAtCompileTime,
MaxCols = traits<T>::MaxColsAtCompileTime
};
typedef Matrix<typename traits<T>::Scalar,
Rows,
Cols,
(MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
MaxRows,
MaxCols
> type;
};
/** \internal The reference selector for template expressions. The idea is that we don't
* need to use references for expressions since they are light weight proxy
* objects which should generate no copying overhead. */
template <typename T>
struct ref_selector
{
typedef typename conditional<
bool(traits<T>::Flags & NestByRefBit),
T const&,
const T
>::type type;
typedef typename conditional<
bool(traits<T>::Flags & NestByRefBit),
T &,
T
>::type non_const_type;
};
/** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
template<typename T1, typename T2>
struct transfer_constness
{
typedef typename conditional<
bool(internal::is_const<T1>::value),
typename internal::add_const_on_value_type<T2>::type,
T2
>::type type;
};
// However, we still need a mechanism to detect whether an expression which is evaluated multiple time
// has to be evaluated into a temporary.
// That's the purpose of this new nested_eval helper:
/** \internal Determines how a given expression should be nested when evaluated multiple times.
* For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
* evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or
* evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
* a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
* many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
*
* \tparam T the type of the expression being nested.
* \tparam n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
* \tparam PlainObject the type of the temporary if needed.
*/
template<typename T, int n, typename PlainObject = typename plain_object_eval<T>::type> struct nested_eval
{
enum {
ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
CoeffReadCost = evaluator<T>::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a tempory?
// Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1.
// This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON
// for all evaluator creating a temporary. This flag is then propagated by the parent evaluators.
// Another solution could be to count the number of temps?
NAsInteger = n == Dynamic ? HugeCost : n,
CostEval = (NAsInteger+1) * ScalarReadCost + CoeffReadCost,
CostNoEval = NAsInteger * CoeffReadCost,
Evaluate = (int(evaluator<T>::Flags) & EvalBeforeNestingBit) || (int(CostEval) < int(CostNoEval))
};
typedef typename conditional<Evaluate, PlainObject, typename ref_selector<T>::type>::type type;
};
template<typename T>
EIGEN_DEVICE_FUNC
inline T* const_cast_ptr(const T* ptr)
{
return const_cast<T*>(ptr);
}
template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
struct dense_xpr_base
{
/* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
};
template<typename Derived>
struct dense_xpr_base<Derived, MatrixXpr>
{
typedef MatrixBase<Derived> type;
};
template<typename Derived>
struct dense_xpr_base<Derived, ArrayXpr>
{
typedef ArrayBase<Derived> type;
};
template<typename Derived, typename XprKind = typename traits<Derived>::XprKind, typename StorageKind = typename traits<Derived>::StorageKind>
struct generic_xpr_base;
template<typename Derived, typename XprKind>
struct generic_xpr_base<Derived, XprKind, Dense>
{
typedef typename dense_xpr_base<Derived,XprKind>::type type;
};
template<typename XprType, typename CastType> struct cast_return_type
{
typedef typename XprType::Scalar CurrentScalarType;
typedef typename remove_all<CastType>::type _CastType;
typedef typename _CastType::Scalar NewScalarType;
typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
const XprType&,CastType>::type type;
};
template <typename A, typename B> struct promote_storage_type;
template <typename A> struct promote_storage_type<A,A>
{
typedef A ret;
};
template <typename A> struct promote_storage_type<A, const A>
{
typedef A ret;
};
template <typename A> struct promote_storage_type<const A, A>
{
typedef A ret;
};
/** \internal Specify the "storage kind" of applying a coefficient-wise
* binary operations between two expressions of kinds A and B respectively.
* The template parameter Functor permits to specialize the resulting storage kind wrt to
* the functor.
* The default rules are as follows:
* \code
* A op A -> A
* A op dense -> dense
* dense op B -> dense
* sparse op dense -> sparse
* dense op sparse -> sparse
* \endcode
*/
template <typename A, typename B, typename Functor> struct cwise_promote_storage_type;
template <typename A, typename Functor> struct cwise_promote_storage_type<A,A,Functor> { typedef A ret; };
template <typename Functor> struct cwise_promote_storage_type<Dense,Dense,Functor> { typedef Dense ret; };
template <typename A, typename Functor> struct cwise_promote_storage_type<A,Dense,Functor> { typedef Dense ret; };
template <typename B, typename Functor> struct cwise_promote_storage_type<Dense,B,Functor> { typedef Dense ret; };
template <typename Functor> struct cwise_promote_storage_type<Sparse,Dense,Functor> { typedef Sparse ret; };
template <typename Functor> struct cwise_promote_storage_type<Dense,Sparse,Functor> { typedef Sparse ret; };
template <typename LhsKind, typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order {
enum { value = LhsOrder };
};
template <typename LhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<LhsKind,Sparse,LhsOrder,RhsOrder> { enum { value = RhsOrder }; };
template <typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<Sparse,RhsKind,LhsOrder,RhsOrder> { enum { value = LhsOrder }; };
template <int Order> struct cwise_promote_storage_order<Sparse,Sparse,Order,Order> { enum { value = Order }; };
/** \internal Specify the "storage kind" of multiplying an expression of kind A with kind B.
* The template parameter ProductTag permits to specialize the resulting storage kind wrt to
* some compile-time properties of the product: GemmProduct, GemvProduct, OuterProduct, InnerProduct.
* The default rules are as follows:
* \code
* K * K -> K
* dense * K -> dense
* K * dense -> dense
* diag * K -> K
* K * diag -> K
* Perm * K -> K
* K * Perm -> K
* \endcode
*/
template <typename A, typename B, int ProductTag> struct product_promote_storage_type;
template <typename A, int ProductTag> struct product_promote_storage_type<A, A, ProductTag> { typedef A ret;};
template <int ProductTag> struct product_promote_storage_type<Dense, Dense, ProductTag> { typedef Dense ret;};
template <typename A, int ProductTag> struct product_promote_storage_type<A, Dense, ProductTag> { typedef Dense ret; };
template <typename B, int ProductTag> struct product_promote_storage_type<Dense, B, ProductTag> { typedef Dense ret; };
template <typename A, int ProductTag> struct product_promote_storage_type<A, DiagonalShape, ProductTag> { typedef A ret; };
template <typename B, int ProductTag> struct product_promote_storage_type<DiagonalShape, B, ProductTag> { typedef B ret; };
template <int ProductTag> struct product_promote_storage_type<Dense, DiagonalShape, ProductTag> { typedef Dense ret; };
template <int ProductTag> struct product_promote_storage_type<DiagonalShape, Dense, ProductTag> { typedef Dense ret; };
template <typename A, int ProductTag> struct product_promote_storage_type<A, PermutationStorage, ProductTag> { typedef A ret; };
template <typename B, int ProductTag> struct product_promote_storage_type<PermutationStorage, B, ProductTag> { typedef B ret; };
template <int ProductTag> struct product_promote_storage_type<Dense, PermutationStorage, ProductTag> { typedef Dense ret; };
template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Dense, ProductTag> { typedef Dense ret; };
/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
* \tparam Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
*/
template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
struct plain_row_type
{
typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
typedef typename conditional<
is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
MatrixRowType,
ArrayRowType
>::type type;
};
template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
struct plain_col_type
{
typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
typedef typename conditional<
is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
MatrixColType,
ArrayColType
>::type type;
};
template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
struct plain_diag_type
{
enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
};
typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
typedef typename conditional<
is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
MatrixDiagType,
ArrayDiagType
>::type type;
};
template<typename Expr,typename Scalar = typename Expr::Scalar>
struct plain_constant_type
{
enum { Options = (traits<Expr>::Flags&RowMajorBit)?RowMajor:0 };
typedef Array<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> array_type;
typedef Matrix<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> matrix_type;
typedef CwiseNullaryOp<scalar_constant_op<Scalar>, const typename conditional<is_same< typename traits<Expr>::XprKind, MatrixXpr >::value, matrix_type, array_type>::type > type;
};
template<typename ExpressionType>
struct is_lvalue
{
enum { value = (!bool(is_const<ExpressionType>::value)) &&
bool(traits<ExpressionType>::Flags & LvalueBit) };
};
template<typename T> struct is_diagonal
{ enum { ret = false }; };
template<typename T> struct is_diagonal<DiagonalBase<T> >
{ enum { ret = true }; };
template<typename T> struct is_diagonal<DiagonalWrapper<T> >
{ enum { ret = true }; };
template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
{ enum { ret = true }; };
template<typename S1, typename S2> struct glue_shapes;
template<> struct glue_shapes<DenseShape,TriangularShape> { typedef TriangularShape type; };
template<typename T1, typename T2>
bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<has_direct_access<T1>::ret&&has_direct_access<T2>::ret, T1>::type * = 0)
{
return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
}
template<typename T1, typename T2>
bool is_same_dense(const T1 &, const T2 &, typename enable_if<!(has_direct_access<T1>::ret&&has_direct_access<T2>::ret), T1>::type * = 0)
{
return false;
}
// Internal helper defining the cost of a scalar division for the type T.
// The default heuristic can be specialized for each scalar type and architecture.
template<typename T,bool Vectorized=false,typename EnaleIf = void>
struct scalar_div_cost {
enum { value = 8*NumTraits<T>::MulCost };
};
template<typename T,bool Vectorized>
struct scalar_div_cost<std::complex<T>, Vectorized> {
enum { value = 2*scalar_div_cost<T>::value
+ 6*NumTraits<T>::MulCost
+ 3*NumTraits<T>::AddCost
};
};
template<bool Vectorized>
struct scalar_div_cost<signed long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 24 }; };
template<bool Vectorized>
struct scalar_div_cost<unsigned long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 21 }; };
#ifdef EIGEN_DEBUG_ASSIGN
std::string demangle_traversal(int t)
{
if(t==DefaultTraversal) return "DefaultTraversal";
if(t==LinearTraversal) return "LinearTraversal";
if(t==InnerVectorizedTraversal) return "InnerVectorizedTraversal";
if(t==LinearVectorizedTraversal) return "LinearVectorizedTraversal";
if(t==SliceVectorizedTraversal) return "SliceVectorizedTraversal";
return "?";
}
std::string demangle_unrolling(int t)
{
if(t==NoUnrolling) return "NoUnrolling";
if(t==InnerUnrolling) return "InnerUnrolling";
if(t==CompleteUnrolling) return "CompleteUnrolling";
return "?";
}
std::string demangle_flags(int f)
{
std::string res;
if(f&RowMajorBit) res += " | RowMajor";
if(f&PacketAccessBit) res += " | Packet";
if(f&LinearAccessBit) res += " | Linear";
if(f&LvalueBit) res += " | Lvalue";
if(f&DirectAccessBit) res += " | Direct";
if(f&NestByRefBit) res += " | NestByRef";
if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit";
return res;
}
#endif
} // end namespace internal
/** \class ScalarBinaryOpTraits
* \ingroup Core_Module
*
* \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is.
*
* This class permits to control the scalar return type of any binary operation performed on two different scalar types through (partial) template specializations.
*
* For instance, let \c U1, \c U2 and \c U3 be three user defined scalar types for which most operations between instances of \c U1 and \c U2 returns an \c U3.
* You can let %Eigen knows that by defining:
\code
template<typename BinaryOp>
struct ScalarBinaryOpTraits<U1,U2,BinaryOp> { typedef U3 ReturnType; };
template<typename BinaryOp>
struct ScalarBinaryOpTraits<U2,U1,BinaryOp> { typedef U3 ReturnType; };
\endcode
* You can then explicitly disable some particular operations to get more explicit error messages:
\code
template<>
struct ScalarBinaryOpTraits<U1,U2,internal::scalar_max_op<U1,U2> > {};
\endcode
* Or customize the return type for individual operation:
\code
template<>
struct ScalarBinaryOpTraits<U1,U2,internal::scalar_sum_op<U1,U2> > { typedef U1 ReturnType; };
\endcode
*
* By default, the following generic combinations are supported:
<table class="manual">
<tr><th>ScalarA</th><th>ScalarB</th><th>BinaryOp</th><th>ReturnType</th><th>Note</th></tr>
<tr ><td>\c T </td><td>\c T </td><td>\c * </td><td>\c T </td><td></td></tr>
<tr class="alt"><td>\c NumTraits<T>::Real </td><td>\c T </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
<tr ><td>\c T </td><td>\c NumTraits<T>::Real </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
</table>
*
* \sa CwiseBinaryOp
*/
template<typename ScalarA, typename ScalarB, typename BinaryOp=internal::scalar_product_op<ScalarA,ScalarB> >
struct ScalarBinaryOpTraits
#ifndef EIGEN_PARSED_BY_DOXYGEN
// for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class.
: internal::scalar_product_traits<ScalarA,ScalarB>
#endif // EIGEN_PARSED_BY_DOXYGEN
{};
template<typename T, typename BinaryOp>
struct ScalarBinaryOpTraits<T,T,BinaryOp>
{
typedef T ReturnType;
};
template <typename T, typename BinaryOp>
struct ScalarBinaryOpTraits<T, typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, BinaryOp>
{
typedef T ReturnType;
};
template <typename T, typename BinaryOp>
struct ScalarBinaryOpTraits<typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, T, BinaryOp>
{
typedef T ReturnType;
};
// For Matrix * Permutation
template<typename T, typename BinaryOp>
struct ScalarBinaryOpTraits<T,void,BinaryOp>
{
typedef T ReturnType;
};
// For Permutation * Matrix
template<typename T, typename BinaryOp>
struct ScalarBinaryOpTraits<void,T,BinaryOp>
{
typedef T ReturnType;
};
// for Permutation*Permutation
template<typename BinaryOp>
struct ScalarBinaryOpTraits<void,void,BinaryOp>
{
typedef void ReturnType;
};
// We require Lhs and Rhs to have "compatible" scalar types.
// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
// add together a float matrix and a double matrix.
#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType<ScalarBinaryOpTraits<LHS, RHS,BINOP> >::value), \
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
} // end namespace Eigen
#endif // EIGEN_XPRHELPER_H
| 34,198 | 40.604623 | 189 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/ComplexEigenSolver.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Claire Maurice
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2010,2012 Jitse Niesen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H
#define EIGEN_COMPLEX_EIGEN_SOLVER_H
#include "./ComplexSchur.h"
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class ComplexEigenSolver
*
* \brief Computes eigenvalues and eigenvectors of general complex matrices
*
* \tparam _MatrixType the type of the matrix of which we are
* computing the eigendecomposition; this is expected to be an
* instantiation of the Matrix class template.
*
* The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
* \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v
* \f$. If \f$ D \f$ is a diagonal matrix with the eigenvalues on
* the diagonal, and \f$ V \f$ is a matrix with the eigenvectors as
* its columns, then \f$ A V = V D \f$. The matrix \f$ V \f$ is
* almost always invertible, in which case we have \f$ A = V D V^{-1}
* \f$. This is called the eigendecomposition.
*
* The main function in this class is compute(), which computes the
* eigenvalues and eigenvectors of a given function. The
* documentation for that function contains an example showing the
* main features of the class.
*
* \sa class EigenSolver, class SelfAdjointEigenSolver
*/
template<typename _MatrixType> class ComplexEigenSolver
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for #MatrixType.
*
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
* \c float or \c double) and just \c Scalar if #Scalar is
* complex.
*/
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
*
* This is a column vector with entries of type #ComplexScalar.
* The length of the vector is the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options&(~RowMajor), MaxColsAtCompileTime, 1> EigenvalueType;
/** \brief Type for matrix of eigenvectors as returned by eigenvectors().
*
* This is a square matrix with entries of type #ComplexScalar.
* The size is the same as the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorType;
/** \brief Default constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute().
*/
ComplexEigenSolver()
: m_eivec(),
m_eivalues(),
m_schur(),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_matX()
{}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa ComplexEigenSolver()
*/
explicit ComplexEigenSolver(Index size)
: m_eivec(size, size),
m_eivalues(size),
m_schur(size),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_matX(size, size)
{}
/** \brief Constructor; computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
*
* This constructor calls compute() to compute the eigendecomposition.
*/
template<typename InputType>
explicit ComplexEigenSolver(const EigenBase<InputType>& matrix, bool computeEigenvectors = true)
: m_eivec(matrix.rows(),matrix.cols()),
m_eivalues(matrix.cols()),
m_schur(matrix.rows()),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_matX(matrix.rows(),matrix.cols())
{
compute(matrix.derived(), computeEigenvectors);
}
/** \brief Returns the eigenvectors of given matrix.
*
* \returns A const reference to the matrix whose columns are the eigenvectors.
*
* \pre Either the constructor
* ComplexEigenSolver(const MatrixType& matrix, bool) or the member
* function compute(const MatrixType& matrix, bool) has been called before
* to compute the eigendecomposition of a matrix, and
* \p computeEigenvectors was set to true (the default).
*
* This function returns a matrix whose columns are the eigenvectors. Column
* \f$ k \f$ is an eigenvector corresponding to eigenvalue number \f$ k
* \f$ as returned by eigenvalues(). The eigenvectors are normalized to
* have (Euclidean) norm equal to one. The matrix returned by this
* function is the matrix \f$ V \f$ in the eigendecomposition \f$ A = V D
* V^{-1} \f$, if it exists.
*
* Example: \include ComplexEigenSolver_eigenvectors.cpp
* Output: \verbinclude ComplexEigenSolver_eigenvectors.out
*/
const EigenvectorType& eigenvectors() const
{
eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec;
}
/** \brief Returns the eigenvalues of given matrix.
*
* \returns A const reference to the column vector containing the eigenvalues.
*
* \pre Either the constructor
* ComplexEigenSolver(const MatrixType& matrix, bool) or the member
* function compute(const MatrixType& matrix, bool) has been called before
* to compute the eigendecomposition of a matrix.
*
* This function returns a column vector containing the
* eigenvalues. Eigenvalues are repeated according to their
* algebraic multiplicity, so there are as many eigenvalues as
* rows in the matrix. The eigenvalues are not sorted in any particular
* order.
*
* Example: \include ComplexEigenSolver_eigenvalues.cpp
* Output: \verbinclude ComplexEigenSolver_eigenvalues.out
*/
const EigenvalueType& eigenvalues() const
{
eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
return m_eivalues;
}
/** \brief Computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
* \returns Reference to \c *this
*
* This function computes the eigenvalues of the complex matrix \p matrix.
* The eigenvalues() function can be used to retrieve them. If
* \p computeEigenvectors is true, then the eigenvectors are also computed
* and can be retrieved by calling eigenvectors().
*
* The matrix is first reduced to Schur form using the
* ComplexSchur class. The Schur decomposition is then used to
* compute the eigenvalues and eigenvectors.
*
* The cost of the computation is dominated by the cost of the
* Schur decomposition, which is \f$ O(n^3) \f$ where \f$ n \f$
* is the size of the matrix.
*
* Example: \include ComplexEigenSolver_compute.cpp
* Output: \verbinclude ComplexEigenSolver_compute.out
*/
template<typename InputType>
ComplexEigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
return m_schur.info();
}
/** \brief Sets the maximum number of iterations allowed. */
ComplexEigenSolver& setMaxIterations(Index maxIters)
{
m_schur.setMaxIterations(maxIters);
return *this;
}
/** \brief Returns the maximum number of iterations. */
Index getMaxIterations()
{
return m_schur.getMaxIterations();
}
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
EigenvectorType m_eivec;
EigenvalueType m_eivalues;
ComplexSchur<MatrixType> m_schur;
bool m_isInitialized;
bool m_eigenvectorsOk;
EigenvectorType m_matX;
private:
void doComputeEigenvectors(RealScalar matrixnorm);
void sortEigenvalues(bool computeEigenvectors);
};
template<typename MatrixType>
template<typename InputType>
ComplexEigenSolver<MatrixType>&
ComplexEigenSolver<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeEigenvectors)
{
check_template_parameters();
// this code is inspired from Jampack
eigen_assert(matrix.cols() == matrix.rows());
// Do a complex Schur decomposition, A = U T U^*
// The eigenvalues are on the diagonal of T.
m_schur.compute(matrix.derived(), computeEigenvectors);
if(m_schur.info() == Success)
{
m_eivalues = m_schur.matrixT().diagonal();
if(computeEigenvectors)
doComputeEigenvectors(m_schur.matrixT().norm());
sortEigenvalues(computeEigenvectors);
}
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
template<typename MatrixType>
void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm)
{
const Index n = m_eivalues.size();
matrixnorm = numext::maxi(matrixnorm,(std::numeric_limits<RealScalar>::min)());
// Compute X such that T = X D X^(-1), where D is the diagonal of T.
// The matrix X is unit triangular.
m_matX = EigenvectorType::Zero(n, n);
for(Index k=n-1 ; k>=0 ; k--)
{
m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0);
// Compute X(i,k) using the (i,k) entry of the equation X T = D X
for(Index i=k-1 ; i>=0 ; i--)
{
m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k);
if(k-i-1>0)
m_matX.coeffRef(i,k) -= (m_schur.matrixT().row(i).segment(i+1,k-i-1) * m_matX.col(k).segment(i+1,k-i-1)).value();
ComplexScalar z = m_schur.matrixT().coeff(i,i) - m_schur.matrixT().coeff(k,k);
if(z==ComplexScalar(0))
{
// If the i-th and k-th eigenvalue are equal, then z equals 0.
// Use a small value instead, to prevent division by zero.
numext::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
}
m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z;
}
}
// Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
m_eivec.noalias() = m_schur.matrixU() * m_matX;
// .. and normalize the eigenvectors
for(Index k=0 ; k<n ; k++)
{
m_eivec.col(k).normalize();
}
}
template<typename MatrixType>
void ComplexEigenSolver<MatrixType>::sortEigenvalues(bool computeEigenvectors)
{
const Index n = m_eivalues.size();
for (Index i=0; i<n; i++)
{
Index k;
m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k);
if (k != 0)
{
k += i;
std::swap(m_eivalues[k],m_eivalues[i]);
if(computeEigenvectors)
m_eivec.col(i).swap(m_eivec.col(k));
}
}
}
} // end namespace Eigen
#endif // EIGEN_COMPLEX_EIGEN_SOLVER_H
| 12,558 | 35.193084 | 141 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/ComplexSchur.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Claire Maurice
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2010,2012 Jitse Niesen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMPLEX_SCHUR_H
#define EIGEN_COMPLEX_SCHUR_H
#include "./HessenbergDecomposition.h"
namespace Eigen {
namespace internal {
template<typename MatrixType, bool IsComplex> struct complex_schur_reduce_to_hessenberg;
}
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class ComplexSchur
*
* \brief Performs a complex Schur decomposition of a real or complex square matrix
*
* \tparam _MatrixType the type of the matrix of which we are
* computing the Schur decomposition; this is expected to be an
* instantiation of the Matrix class template.
*
* Given a real or complex square matrix A, this class computes the
* Schur decomposition: \f$ A = U T U^*\f$ where U is a unitary
* complex matrix, and T is a complex upper triangular matrix. The
* diagonal of the matrix T corresponds to the eigenvalues of the
* matrix A.
*
* Call the function compute() to compute the Schur decomposition of
* a given matrix. Alternatively, you can use the
* ComplexSchur(const MatrixType&, bool) constructor which computes
* the Schur decomposition at construction time. Once the
* decomposition is computed, you can use the matrixU() and matrixT()
* functions to retrieve the matrices U and V in the decomposition.
*
* \note This code is inspired from Jampack
*
* \sa class RealSchur, class EigenSolver, class ComplexEigenSolver
*/
template<typename _MatrixType> class ComplexSchur
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for \p _MatrixType.
*
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
* \c float or \c double) and just \c Scalar if #Scalar is
* complex.
*/
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Type for the matrices in the Schur decomposition.
*
* This is a square matrix with entries of type #ComplexScalar.
* The size is the same as the size of \p _MatrixType.
*/
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrixType;
/** \brief Default constructor.
*
* \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed.
*
* The default constructor is useful in cases in which the user
* intends to perform decompositions via compute(). The \p size
* parameter is only used as a hint. It is not an error to give a
* wrong \p size, but it may impair performance.
*
* \sa compute() for an example.
*/
explicit ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
: m_matT(size,size),
m_matU(size,size),
m_hess(size),
m_isInitialized(false),
m_matUisUptodate(false),
m_maxIters(-1)
{}
/** \brief Constructor; computes Schur decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
*
* This constructor calls compute() to compute the Schur decomposition.
*
* \sa matrixT() and matrixU() for examples.
*/
template<typename InputType>
explicit ComplexSchur(const EigenBase<InputType>& matrix, bool computeU = true)
: m_matT(matrix.rows(),matrix.cols()),
m_matU(matrix.rows(),matrix.cols()),
m_hess(matrix.rows()),
m_isInitialized(false),
m_matUisUptodate(false),
m_maxIters(-1)
{
compute(matrix.derived(), computeU);
}
/** \brief Returns the unitary matrix in the Schur decomposition.
*
* \returns A const reference to the matrix U.
*
* It is assumed that either the constructor
* ComplexSchur(const MatrixType& matrix, bool computeU) or the
* member function compute(const MatrixType& matrix, bool computeU)
* has been called before to compute the Schur decomposition of a
* matrix, and that \p computeU was set to true (the default
* value).
*
* Example: \include ComplexSchur_matrixU.cpp
* Output: \verbinclude ComplexSchur_matrixU.out
*/
const ComplexMatrixType& matrixU() const
{
eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition.");
return m_matU;
}
/** \brief Returns the triangular matrix in the Schur decomposition.
*
* \returns A const reference to the matrix T.
*
* It is assumed that either the constructor
* ComplexSchur(const MatrixType& matrix, bool computeU) or the
* member function compute(const MatrixType& matrix, bool computeU)
* has been called before to compute the Schur decomposition of a
* matrix.
*
* Note that this function returns a plain square matrix. If you want to reference
* only the upper triangular part, use:
* \code schur.matrixT().triangularView<Upper>() \endcode
*
* Example: \include ComplexSchur_matrixT.cpp
* Output: \verbinclude ComplexSchur_matrixT.out
*/
const ComplexMatrixType& matrixT() const
{
eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
return m_matT;
}
/** \brief Computes Schur decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
* \returns Reference to \c *this
*
* The Schur decomposition is computed by first reducing the
* matrix to Hessenberg form using the class
* HessenbergDecomposition. The Hessenberg matrix is then reduced
* to triangular form by performing QR iterations with a single
* shift. The cost of computing the Schur decomposition depends
* on the number of iterations; as a rough guide, it may be taken
* on the number of iterations; as a rough guide, it may be taken
* to be \f$25n^3\f$ complex flops, or \f$10n^3\f$ complex flops
* if \a computeU is false.
*
* Example: \include ComplexSchur_compute.cpp
* Output: \verbinclude ComplexSchur_compute.out
*
* \sa compute(const MatrixType&, bool, Index)
*/
template<typename InputType>
ComplexSchur& compute(const EigenBase<InputType>& matrix, bool computeU = true);
/** \brief Compute Schur decomposition from a given Hessenberg matrix
* \param[in] matrixH Matrix in Hessenberg form H
* \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T
* \param computeU Computes the matriX U of the Schur vectors
* \return Reference to \c *this
*
* This routine assumes that the matrix is already reduced in Hessenberg form matrixH
* using either the class HessenbergDecomposition or another mean.
* It computes the upper quasi-triangular matrix T of the Schur decomposition of H
* When computeU is true, this routine computes the matrix U such that
* A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix
*
* NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix
* is not available, the user should give an identity matrix (Q.setIdentity())
*
* \sa compute(const MatrixType&, bool)
*/
template<typename HessMatrixType, typename OrthMatrixType>
ComplexSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU=true);
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
return m_info;
}
/** \brief Sets the maximum number of iterations allowed.
*
* If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
* of the matrix.
*/
ComplexSchur& setMaxIterations(Index maxIters)
{
m_maxIters = maxIters;
return *this;
}
/** \brief Returns the maximum number of iterations. */
Index getMaxIterations()
{
return m_maxIters;
}
/** \brief Maximum number of iterations per row.
*
* If not otherwise specified, the maximum number of iterations is this number times the size of the
* matrix. It is currently set to 30.
*/
static const int m_maxIterationsPerRow = 30;
protected:
ComplexMatrixType m_matT, m_matU;
HessenbergDecomposition<MatrixType> m_hess;
ComputationInfo m_info;
bool m_isInitialized;
bool m_matUisUptodate;
Index m_maxIters;
private:
bool subdiagonalEntryIsNeglegible(Index i);
ComplexScalar computeShift(Index iu, Index iter);
void reduceToTriangularForm(bool computeU);
friend struct internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;
};
/** If m_matT(i+1,i) is neglegible in floating point arithmetic
* compared to m_matT(i,i) and m_matT(j,j), then set it to zero and
* return true, else return false. */
template<typename MatrixType>
inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
{
RealScalar d = numext::norm1(m_matT.coeff(i,i)) + numext::norm1(m_matT.coeff(i+1,i+1));
RealScalar sd = numext::norm1(m_matT.coeff(i+1,i));
if (internal::isMuchSmallerThan(sd, d, NumTraits<RealScalar>::epsilon()))
{
m_matT.coeffRef(i+1,i) = ComplexScalar(0);
return true;
}
return false;
}
/** Compute the shift in the current QR iteration. */
template<typename MatrixType>
typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)
{
using std::abs;
if (iter == 10 || iter == 20)
{
// exceptional shift, taken from http://www.netlib.org/eispack/comqr.f
return abs(numext::real(m_matT.coeff(iu,iu-1))) + abs(numext::real(m_matT.coeff(iu-1,iu-2)));
}
// compute the shift as one of the eigenvalues of t, the 2x2
// diagonal block on the bottom of the active submatrix
Matrix<ComplexScalar,2,2> t = m_matT.template block<2,2>(iu-1,iu-1);
RealScalar normt = t.cwiseAbs().sum();
t /= normt; // the normalization by sf is to avoid under/overflow
ComplexScalar b = t.coeff(0,1) * t.coeff(1,0);
ComplexScalar c = t.coeff(0,0) - t.coeff(1,1);
ComplexScalar disc = sqrt(c*c + RealScalar(4)*b);
ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b;
ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1);
ComplexScalar eival1 = (trace + disc) / RealScalar(2);
ComplexScalar eival2 = (trace - disc) / RealScalar(2);
if(numext::norm1(eival1) > numext::norm1(eival2))
eival2 = det / eival1;
else
eival1 = det / eival2;
// choose the eigenvalue closest to the bottom entry of the diagonal
if(numext::norm1(eival1-t.coeff(1,1)) < numext::norm1(eival2-t.coeff(1,1)))
return normt * eival1;
else
return normt * eival2;
}
template<typename MatrixType>
template<typename InputType>
ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeU)
{
m_matUisUptodate = false;
eigen_assert(matrix.cols() == matrix.rows());
if(matrix.cols() == 1)
{
m_matT = matrix.derived().template cast<ComplexScalar>();
if(computeU) m_matU = ComplexMatrixType::Identity(1,1);
m_info = Success;
m_isInitialized = true;
m_matUisUptodate = computeU;
return *this;
}
internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix.derived(), computeU);
computeFromHessenberg(m_matT, m_matU, computeU);
return *this;
}
template<typename MatrixType>
template<typename HessMatrixType, typename OrthMatrixType>
ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU)
{
m_matT = matrixH;
if(computeU)
m_matU = matrixQ;
reduceToTriangularForm(computeU);
return *this;
}
namespace internal {
/* Reduce given matrix to Hessenberg form */
template<typename MatrixType, bool IsComplex>
struct complex_schur_reduce_to_hessenberg
{
// this is the implementation for the case IsComplex = true
static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
{
_this.m_hess.compute(matrix);
_this.m_matT = _this.m_hess.matrixH();
if(computeU) _this.m_matU = _this.m_hess.matrixQ();
}
};
template<typename MatrixType>
struct complex_schur_reduce_to_hessenberg<MatrixType, false>
{
static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
{
typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
// Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar
_this.m_hess.compute(matrix);
_this.m_matT = _this.m_hess.matrixH().template cast<ComplexScalar>();
if(computeU)
{
// This may cause an allocation which seems to be avoidable
MatrixType Q = _this.m_hess.matrixQ();
_this.m_matU = Q.template cast<ComplexScalar>();
}
}
};
} // end namespace internal
// Reduce the Hessenberg matrix m_matT to triangular form by QR iteration.
template<typename MatrixType>
void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
{
Index maxIters = m_maxIters;
if (maxIters == -1)
maxIters = m_maxIterationsPerRow * m_matT.rows();
// The matrix m_matT is divided in three parts.
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active submatrix).
// Rows iu+1,...,end are already brought in triangular form.
Index iu = m_matT.cols() - 1;
Index il;
Index iter = 0; // number of iterations we are working on the (iu,iu) element
Index totalIter = 0; // number of iterations for whole matrix
while(true)
{
// find iu, the bottom row of the active submatrix
while(iu > 0)
{
if(!subdiagonalEntryIsNeglegible(iu-1)) break;
iter = 0;
--iu;
}
// if iu is zero then we are done; the whole matrix is triangularized
if(iu==0) break;
// if we spent too many iterations, we give up
iter++;
totalIter++;
if(totalIter > maxIters) break;
// find il, the top row of the active submatrix
il = iu-1;
while(il > 0 && !subdiagonalEntryIsNeglegible(il-1))
{
--il;
}
/* perform the QR step using Givens rotations. The first rotation
creates a bulge; the (il+2,il) element becomes nonzero. This
bulge is chased down to the bottom of the active submatrix. */
ComplexScalar shift = computeShift(iu, iter);
JacobiRotation<ComplexScalar> rot;
rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
for(Index i=il+1 ; i<iu ; i++)
{
rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
}
}
if(totalIter <= maxIters)
m_info = Success;
else
m_info = NoConvergence;
m_isInitialized = true;
m_matUisUptodate = computeU;
}
} // end namespace Eigen
#endif // EIGEN_COMPLEX_SCHUR_H
| 17,021 | 36.004348 | 150 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Complex Schur needed to complex unsymmetrical eigenvalues/eigenvectors.
********************************************************************************
*/
#ifndef EIGEN_COMPLEX_SCHUR_LAPACKE_H
#define EIGEN_COMPLEX_SCHUR_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_SCHUR_COMPLEX(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, LAPACKE_PREFIX_U, EIGCOLROW, LAPACKE_COLROW) \
template<> template<typename InputType> inline \
ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, bool computeU) \
{ \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
typedef MatrixType::RealScalar RealScalar; \
typedef std::complex<RealScalar> ComplexScalar; \
\
eigen_assert(matrix.cols() == matrix.rows()); \
\
m_matUisUptodate = false; \
if(matrix.cols() == 1) \
{ \
m_matT = matrix.derived().template cast<ComplexScalar>(); \
if(computeU) m_matU = ComplexMatrixType::Identity(1,1); \
m_info = Success; \
m_isInitialized = true; \
m_matUisUptodate = computeU; \
return *this; \
} \
lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), sdim, info; \
lapack_int matrix_order = LAPACKE_COLROW; \
char jobvs, sort='N'; \
LAPACK_##LAPACKE_PREFIX_U##_SELECT1 select = 0; \
jobvs = (computeU) ? 'V' : 'N'; \
m_matU.resize(n, n); \
lapack_int ldvs = internal::convert_index<lapack_int>(m_matU.outerStride()); \
m_matT = matrix; \
lapack_int lda = internal::convert_index<lapack_int>(m_matT.outerStride()); \
Matrix<EIGTYPE, Dynamic, Dynamic> w; \
w.resize(n, 1);\
info = LAPACKE_##LAPACKE_PREFIX##gees( matrix_order, jobvs, sort, select, n, (LAPACKE_TYPE*)m_matT.data(), lda, &sdim, (LAPACKE_TYPE*)w.data(), (LAPACKE_TYPE*)m_matU.data(), ldvs ); \
if(info == 0) \
m_info = Success; \
else \
m_info = NoConvergence; \
\
m_isInitialized = true; \
m_matUisUptodate = computeU; \
return *this; \
\
}
EIGEN_LAPACKE_SCHUR_COMPLEX(dcomplex, lapack_complex_double, z, Z, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SCHUR_COMPLEX(scomplex, lapack_complex_float, c, C, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SCHUR_COMPLEX(dcomplex, lapack_complex_double, z, Z, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SCHUR_COMPLEX(scomplex, lapack_complex_float, c, C, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen
#endif // EIGEN_COMPLEX_SCHUR_LAPACKE_H
| 4,178 | 44.423913 | 185 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/EigenSolver.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2010,2012 Jitse Niesen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_EIGENSOLVER_H
#define EIGEN_EIGENSOLVER_H
#include "./RealSchur.h"
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class EigenSolver
*
* \brief Computes eigenvalues and eigenvectors of general matrices
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* eigendecomposition; this is expected to be an instantiation of the Matrix
* class template. Currently, only real matrices are supported.
*
* The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
* \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$. If
* \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
* \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
* V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
* have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition.
*
* The eigenvalues and eigenvectors of a matrix may be complex, even when the
* matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D
* \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the
* matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to
* have blocks of the form
* \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f]
* (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal. These
* blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call
* this variant of the eigendecomposition the pseudo-eigendecomposition.
*
* Call the function compute() to compute the eigenvalues and eigenvectors of
* a given matrix. Alternatively, you can use the
* EigenSolver(const MatrixType&, bool) constructor which computes the
* eigenvalues and eigenvectors at construction time. Once the eigenvalue and
* eigenvectors are computed, they can be retrieved with the eigenvalues() and
* eigenvectors() functions. The pseudoEigenvalueMatrix() and
* pseudoEigenvectors() methods allow the construction of the
* pseudo-eigendecomposition.
*
* The documentation for EigenSolver(const MatrixType&, bool) contains an
* example of the typical use of this class.
*
* \note The implementation is adapted from
* <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
* Their code is based on EISPACK.
*
* \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
*/
template<typename _MatrixType> class EigenSolver
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for #MatrixType.
*
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
* \c float or \c double) and just \c Scalar if #Scalar is
* complex.
*/
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
*
* This is a column vector with entries of type #ComplexScalar.
* The length of the vector is the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
/** \brief Type for matrix of eigenvectors as returned by eigenvectors().
*
* This is a square matrix with entries of type #ComplexScalar.
* The size is the same as the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
/** \brief Default constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via EigenSolver::compute(const MatrixType&, bool).
*
* \sa compute() for an example.
*/
EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}
/** \brief Default constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa EigenSolver()
*/
explicit EigenSolver(Index size)
: m_eivec(size, size),
m_eivalues(size),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_realSchur(size),
m_matT(size, size),
m_tmp(size)
{}
/** \brief Constructor; computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
*
* This constructor calls compute() to compute the eigenvalues
* and eigenvectors.
*
* Example: \include EigenSolver_EigenSolver_MatrixType.cpp
* Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out
*
* \sa compute()
*/
template<typename InputType>
explicit EigenSolver(const EigenBase<InputType>& matrix, bool computeEigenvectors = true)
: m_eivec(matrix.rows(), matrix.cols()),
m_eivalues(matrix.cols()),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_realSchur(matrix.cols()),
m_matT(matrix.rows(), matrix.cols()),
m_tmp(matrix.cols())
{
compute(matrix.derived(), computeEigenvectors);
}
/** \brief Returns the eigenvectors of given matrix.
*
* \returns %Matrix whose columns are the (possibly complex) eigenvectors.
*
* \pre Either the constructor
* EigenSolver(const MatrixType&,bool) or the member function
* compute(const MatrixType&, bool) has been called before, and
* \p computeEigenvectors was set to true (the default).
*
* Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
* to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
* eigenvectors are normalized to have (Euclidean) norm equal to one. The
* matrix returned by this function is the matrix \f$ V \f$ in the
* eigendecomposition \f$ A = V D V^{-1} \f$, if it exists.
*
* Example: \include EigenSolver_eigenvectors.cpp
* Output: \verbinclude EigenSolver_eigenvectors.out
*
* \sa eigenvalues(), pseudoEigenvectors()
*/
EigenvectorsType eigenvectors() const;
/** \brief Returns the pseudo-eigenvectors of given matrix.
*
* \returns Const reference to matrix whose columns are the pseudo-eigenvectors.
*
* \pre Either the constructor
* EigenSolver(const MatrixType&,bool) or the member function
* compute(const MatrixType&, bool) has been called before, and
* \p computeEigenvectors was set to true (the default).
*
* The real matrix \f$ V \f$ returned by this function and the
* block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix()
* satisfy \f$ AV = VD \f$.
*
* Example: \include EigenSolver_pseudoEigenvectors.cpp
* Output: \verbinclude EigenSolver_pseudoEigenvectors.out
*
* \sa pseudoEigenvalueMatrix(), eigenvectors()
*/
const MatrixType& pseudoEigenvectors() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec;
}
/** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition.
*
* \returns A block-diagonal matrix.
*
* \pre Either the constructor
* EigenSolver(const MatrixType&,bool) or the member function
* compute(const MatrixType&, bool) has been called before.
*
* The matrix \f$ D \f$ returned by this function is real and
* block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2
* blocks of the form
* \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$.
* These blocks are not sorted in any particular order.
* The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by
* pseudoEigenvectors() satisfy \f$ AV = VD \f$.
*
* \sa pseudoEigenvectors() for an example, eigenvalues()
*/
MatrixType pseudoEigenvalueMatrix() const;
/** \brief Returns the eigenvalues of given matrix.
*
* \returns A const reference to the column vector containing the eigenvalues.
*
* \pre Either the constructor
* EigenSolver(const MatrixType&,bool) or the member function
* compute(const MatrixType&, bool) has been called before.
*
* The eigenvalues are repeated according to their algebraic multiplicity,
* so there are as many eigenvalues as rows in the matrix. The eigenvalues
* are not sorted in any particular order.
*
* Example: \include EigenSolver_eigenvalues.cpp
* Output: \verbinclude EigenSolver_eigenvalues.out
*
* \sa eigenvectors(), pseudoEigenvalueMatrix(),
* MatrixBase::eigenvalues()
*/
const EigenvalueType& eigenvalues() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
return m_eivalues;
}
/** \brief Computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
* \returns Reference to \c *this
*
* This function computes the eigenvalues of the real matrix \p matrix.
* The eigenvalues() function can be used to retrieve them. If
* \p computeEigenvectors is true, then the eigenvectors are also computed
* and can be retrieved by calling eigenvectors().
*
* The matrix is first reduced to real Schur form using the RealSchur
* class. The Schur decomposition is then used to compute the eigenvalues
* and eigenvectors.
*
* The cost of the computation is dominated by the cost of the
* Schur decomposition, which is very approximately \f$ 25n^3 \f$
* (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors
* is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false.
*
* This method reuses of the allocated data in the EigenSolver object.
*
* Example: \include EigenSolver_compute.cpp
* Output: \verbinclude EigenSolver_compute.out
*/
template<typename InputType>
EigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);
/** \returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
return m_info;
}
/** \brief Sets the maximum number of iterations allowed. */
EigenSolver& setMaxIterations(Index maxIters)
{
m_realSchur.setMaxIterations(maxIters);
return *this;
}
/** \brief Returns the maximum number of iterations. */
Index getMaxIterations()
{
return m_realSchur.getMaxIterations();
}
private:
void doComputeEigenvectors();
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
}
MatrixType m_eivec;
EigenvalueType m_eivalues;
bool m_isInitialized;
bool m_eigenvectorsOk;
ComputationInfo m_info;
RealSchur<MatrixType> m_realSchur;
MatrixType m_matT;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
ColumnVectorType m_tmp;
};
template<typename MatrixType>
MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();
Index n = m_eivalues.rows();
MatrixType matD = MatrixType::Zero(n,n);
for (Index i=0; i<n; ++i)
{
if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i)), precision))
matD.coeffRef(i,i) = numext::real(m_eivalues.coeff(i));
else
{
matD.template block<2,2>(i,i) << numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)),
-numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i));
++i;
}
}
return matD;
}
template<typename MatrixType>
typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();
Index n = m_eivec.cols();
EigenvectorsType matV(n,n);
for (Index j=0; j<n; ++j)
{
if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(j)), numext::real(m_eivalues.coeff(j)), precision) || j+1==n)
{
// we have a real eigen value
matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
matV.col(j).normalize();
}
else
{
// we have a pair of complex eigen values
for (Index i=0; i<n; ++i)
{
matV.coeffRef(i,j) = ComplexScalar(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1));
matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
}
matV.col(j).normalize();
matV.col(j+1).normalize();
++j;
}
}
return matV;
}
template<typename MatrixType>
template<typename InputType>
EigenSolver<MatrixType>&
EigenSolver<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeEigenvectors)
{
check_template_parameters();
using std::sqrt;
using std::abs;
using numext::isfinite;
eigen_assert(matrix.cols() == matrix.rows());
// Reduce to real Schur form.
m_realSchur.compute(matrix.derived(), computeEigenvectors);
m_info = m_realSchur.info();
if (m_info == Success)
{
m_matT = m_realSchur.matrixT();
if (computeEigenvectors)
m_eivec = m_realSchur.matrixU();
// Compute eigenvalues from matT
m_eivalues.resize(matrix.cols());
Index i = 0;
while (i < matrix.cols())
{
if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0))
{
m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
if(!(isfinite)(m_eivalues.coeffRef(i)))
{
m_isInitialized = true;
m_eigenvectorsOk = false;
m_info = NumericalIssue;
return *this;
}
++i;
}
else
{
Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
Scalar z;
// Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
// without overflow
{
Scalar t0 = m_matT.coeff(i+1, i);
Scalar t1 = m_matT.coeff(i, i+1);
Scalar maxval = numext::maxi<Scalar>(abs(p),numext::maxi<Scalar>(abs(t0),abs(t1)));
t0 /= maxval;
t1 /= maxval;
Scalar p0 = p/maxval;
z = maxval * sqrt(abs(p0 * p0 + t0 * t1));
}
m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
if(!((isfinite)(m_eivalues.coeffRef(i)) && (isfinite)(m_eivalues.coeffRef(i+1))))
{
m_isInitialized = true;
m_eigenvectorsOk = false;
m_info = NumericalIssue;
return *this;
}
i += 2;
}
}
// Compute eigenvectors.
if (computeEigenvectors)
doComputeEigenvectors();
}
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
template<typename MatrixType>
void EigenSolver<MatrixType>::doComputeEigenvectors()
{
using std::abs;
const Index size = m_eivec.cols();
const Scalar eps = NumTraits<Scalar>::epsilon();
// inefficient! this is already computed in RealSchur
Scalar norm(0);
for (Index j = 0; j < size; ++j)
{
norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
}
// Backsubstitute to find vectors of upper triangular form
if (norm == Scalar(0))
{
return;
}
for (Index n = size-1; n >= 0; n--)
{
Scalar p = m_eivalues.coeff(n).real();
Scalar q = m_eivalues.coeff(n).imag();
// Scalar vector
if (q == Scalar(0))
{
Scalar lastr(0), lastw(0);
Index l = n;
m_matT.coeffRef(n,n) = Scalar(1);
for (Index i = n-1; i >= 0; i--)
{
Scalar w = m_matT.coeff(i,i) - p;
Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
if (m_eivalues.coeff(i).imag() < Scalar(0))
{
lastw = w;
lastr = r;
}
else
{
l = i;
if (m_eivalues.coeff(i).imag() == Scalar(0))
{
if (w != Scalar(0))
m_matT.coeffRef(i,n) = -r / w;
else
m_matT.coeffRef(i,n) = -r / (eps * norm);
}
else // Solve real equations
{
Scalar x = m_matT.coeff(i,i+1);
Scalar y = m_matT.coeff(i+1,i);
Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
Scalar t = (x * lastr - lastw * r) / denom;
m_matT.coeffRef(i,n) = t;
if (abs(x) > abs(lastw))
m_matT.coeffRef(i+1,n) = (-r - w * t) / x;
else
m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw;
}
// Overflow control
Scalar t = abs(m_matT.coeff(i,n));
if ((eps * t) * t > Scalar(1))
m_matT.col(n).tail(size-i) /= t;
}
}
}
else if (q < Scalar(0) && n > 0) // Complex vector
{
Scalar lastra(0), lastsa(0), lastw(0);
Index l = n-1;
// Last vector component imaginary so matrix is triangular
if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n)))
{
m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1);
m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1);
}
else
{
ComplexScalar cc = ComplexScalar(Scalar(0),-m_matT.coeff(n-1,n)) / ComplexScalar(m_matT.coeff(n-1,n-1)-p,q);
m_matT.coeffRef(n-1,n-1) = numext::real(cc);
m_matT.coeffRef(n-1,n) = numext::imag(cc);
}
m_matT.coeffRef(n,n-1) = Scalar(0);
m_matT.coeffRef(n,n) = Scalar(1);
for (Index i = n-2; i >= 0; i--)
{
Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1));
Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
Scalar w = m_matT.coeff(i,i) - p;
if (m_eivalues.coeff(i).imag() < Scalar(0))
{
lastw = w;
lastra = ra;
lastsa = sa;
}
else
{
l = i;
if (m_eivalues.coeff(i).imag() == RealScalar(0))
{
ComplexScalar cc = ComplexScalar(-ra,-sa) / ComplexScalar(w,q);
m_matT.coeffRef(i,n-1) = numext::real(cc);
m_matT.coeffRef(i,n) = numext::imag(cc);
}
else
{
// Solve complex equations
Scalar x = m_matT.coeff(i,i+1);
Scalar y = m_matT.coeff(i+1,i);
Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
if ((vr == Scalar(0)) && (vi == Scalar(0)))
vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));
ComplexScalar cc = ComplexScalar(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra) / ComplexScalar(vr,vi);
m_matT.coeffRef(i,n-1) = numext::real(cc);
m_matT.coeffRef(i,n) = numext::imag(cc);
if (abs(x) > (abs(lastw) + abs(q)))
{
m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;
m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x;
}
else
{
cc = ComplexScalar(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n)) / ComplexScalar(lastw,q);
m_matT.coeffRef(i+1,n-1) = numext::real(cc);
m_matT.coeffRef(i+1,n) = numext::imag(cc);
}
}
// Overflow control
Scalar t = numext::maxi<Scalar>(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n)));
if ((eps * t) * t > Scalar(1))
m_matT.block(i, n-1, size-i, 2) /= t;
}
}
// We handled a pair of complex conjugate eigenvalues, so need to skip them both
n--;
}
else
{
eigen_assert(0 && "Internal bug in EigenSolver (INF or NaN has not been detected)"); // this should not happen
}
}
// Back transformation to get eigenvectors of original matrix
for (Index j = size-1; j >= 0; j--)
{
m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1);
m_eivec.col(j) = m_tmp;
}
}
} // end namespace Eigen
#endif // EIGEN_EIGENSOLVER_H
| 22,944 | 35.829856 | 158 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012-2016 Gael Guennebaud <[email protected]>
// Copyright (C) 2010,2012 Jitse Niesen <[email protected]>
// Copyright (C) 2016 Tobias Wood <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERALIZEDEIGENSOLVER_H
#define EIGEN_GENERALIZEDEIGENSOLVER_H
#include "./RealQZ.h"
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class GeneralizedEigenSolver
*
* \brief Computes the generalized eigenvalues and eigenvectors of a pair of general matrices
*
* \tparam _MatrixType the type of the matrices of which we are computing the
* eigen-decomposition; this is expected to be an instantiation of the Matrix
* class template. Currently, only real matrices are supported.
*
* The generalized eigenvalues and eigenvectors of a matrix pair \f$ A \f$ and \f$ B \f$ are scalars
* \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda Bv \f$. If
* \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
* \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
* B V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
* have \f$ A = B V D V^{-1} \f$. This is called the generalized eigen-decomposition.
*
* The generalized eigenvalues and eigenvectors of a matrix pair may be complex, even when the
* matrices are real. Moreover, the generalized eigenvalue might be infinite if the matrix B is
* singular. To workaround this difficulty, the eigenvalues are provided as a pair of complex \f$ \alpha \f$
* and real \f$ \beta \f$ such that: \f$ \lambda_i = \alpha_i / \beta_i \f$. If \f$ \beta_i \f$ is (nearly) zero,
* then one can consider the well defined left eigenvalue \f$ \mu = \beta_i / \alpha_i\f$ such that:
* \f$ \mu_i A v_i = B v_i \f$, or even \f$ \mu_i u_i^T A = u_i^T B \f$ where \f$ u_i \f$ is
* called the left eigenvector.
*
* Call the function compute() to compute the generalized eigenvalues and eigenvectors of
* a given matrix pair. Alternatively, you can use the
* GeneralizedEigenSolver(const MatrixType&, const MatrixType&, bool) constructor which computes the
* eigenvalues and eigenvectors at construction time. Once the eigenvalue and
* eigenvectors are computed, they can be retrieved with the eigenvalues() and
* eigenvectors() functions.
*
* Here is an usage example of this class:
* Example: \include GeneralizedEigenSolver.cpp
* Output: \verbinclude GeneralizedEigenSolver.out
*
* \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
*/
template<typename _MatrixType> class GeneralizedEigenSolver
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for #MatrixType.
*
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
* \c float or \c double) and just \c Scalar if #Scalar is
* complex.
*/
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Type for vector of real scalar values eigenvalues as returned by betas().
*
* This is a column vector with entries of type #Scalar.
* The length of the vector is the size of #MatrixType.
*/
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> VectorType;
/** \brief Type for vector of complex scalar values eigenvalues as returned by alphas().
*
* This is a column vector with entries of type #ComplexScalar.
* The length of the vector is the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ComplexVectorType;
/** \brief Expression type for the eigenvalues as returned by eigenvalues().
*/
typedef CwiseBinaryOp<internal::scalar_quotient_op<ComplexScalar,Scalar>,ComplexVectorType,VectorType> EigenvalueType;
/** \brief Type for matrix of eigenvectors as returned by eigenvectors().
*
* This is a square matrix with entries of type #ComplexScalar.
* The size is the same as the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
/** \brief Default constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via EigenSolver::compute(const MatrixType&, bool).
*
* \sa compute() for an example.
*/
GeneralizedEigenSolver()
: m_eivec(),
m_alphas(),
m_betas(),
m_valuesOkay(false),
m_vectorsOkay(false),
m_realQZ()
{}
/** \brief Default constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa GeneralizedEigenSolver()
*/
explicit GeneralizedEigenSolver(Index size)
: m_eivec(size, size),
m_alphas(size),
m_betas(size),
m_valuesOkay(false),
m_vectorsOkay(false),
m_realQZ(size),
m_tmp(size)
{}
/** \brief Constructor; computes the generalized eigendecomposition of given matrix pair.
*
* \param[in] A Square matrix whose eigendecomposition is to be computed.
* \param[in] B Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are computed.
*
* This constructor calls compute() to compute the generalized eigenvalues
* and eigenvectors.
*
* \sa compute()
*/
GeneralizedEigenSolver(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true)
: m_eivec(A.rows(), A.cols()),
m_alphas(A.cols()),
m_betas(A.cols()),
m_valuesOkay(false),
m_vectorsOkay(false),
m_realQZ(A.cols()),
m_tmp(A.cols())
{
compute(A, B, computeEigenvectors);
}
/* \brief Returns the computed generalized eigenvectors.
*
* \returns %Matrix whose columns are the (possibly complex) right eigenvectors.
* i.e. the eigenvectors that solve (A - l*B)x = 0. The ordering matches the eigenvalues.
*
* \pre Either the constructor
* GeneralizedEigenSolver(const MatrixType&,const MatrixType&, bool) or the member function
* compute(const MatrixType&, const MatrixType& bool) has been called before, and
* \p computeEigenvectors was set to true (the default).
*
* \sa eigenvalues()
*/
EigenvectorsType eigenvectors() const {
eigen_assert(m_vectorsOkay && "Eigenvectors for GeneralizedEigenSolver were not calculated.");
return m_eivec;
}
/** \brief Returns an expression of the computed generalized eigenvalues.
*
* \returns An expression of the column vector containing the eigenvalues.
*
* It is a shortcut for \code this->alphas().cwiseQuotient(this->betas()); \endcode
* Not that betas might contain zeros. It is therefore not recommended to use this function,
* but rather directly deal with the alphas and betas vectors.
*
* \pre Either the constructor
* GeneralizedEigenSolver(const MatrixType&,const MatrixType&,bool) or the member function
* compute(const MatrixType&,const MatrixType&,bool) has been called before.
*
* The eigenvalues are repeated according to their algebraic multiplicity,
* so there are as many eigenvalues as rows in the matrix. The eigenvalues
* are not sorted in any particular order.
*
* \sa alphas(), betas(), eigenvectors()
*/
EigenvalueType eigenvalues() const
{
eigen_assert(m_valuesOkay && "GeneralizedEigenSolver is not initialized.");
return EigenvalueType(m_alphas,m_betas);
}
/** \returns A const reference to the vectors containing the alpha values
*
* This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
*
* \sa betas(), eigenvalues() */
ComplexVectorType alphas() const
{
eigen_assert(m_valuesOkay && "GeneralizedEigenSolver is not initialized.");
return m_alphas;
}
/** \returns A const reference to the vectors containing the beta values
*
* This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
*
* \sa alphas(), eigenvalues() */
VectorType betas() const
{
eigen_assert(m_valuesOkay && "GeneralizedEigenSolver is not initialized.");
return m_betas;
}
/** \brief Computes generalized eigendecomposition of given matrix.
*
* \param[in] A Square matrix whose eigendecomposition is to be computed.
* \param[in] B Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
* \returns Reference to \c *this
*
* This function computes the eigenvalues of the real matrix \p matrix.
* The eigenvalues() function can be used to retrieve them. If
* \p computeEigenvectors is true, then the eigenvectors are also computed
* and can be retrieved by calling eigenvectors().
*
* The matrix is first reduced to real generalized Schur form using the RealQZ
* class. The generalized Schur decomposition is then used to compute the eigenvalues
* and eigenvectors.
*
* The cost of the computation is dominated by the cost of the
* generalized Schur decomposition.
*
* This method reuses of the allocated data in the GeneralizedEigenSolver object.
*/
GeneralizedEigenSolver& compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true);
ComputationInfo info() const
{
eigen_assert(m_valuesOkay && "EigenSolver is not initialized.");
return m_realQZ.info();
}
/** Sets the maximal number of iterations allowed.
*/
GeneralizedEigenSolver& setMaxIterations(Index maxIters)
{
m_realQZ.setMaxIterations(maxIters);
return *this;
}
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
}
EigenvectorsType m_eivec;
ComplexVectorType m_alphas;
VectorType m_betas;
bool m_valuesOkay, m_vectorsOkay;
RealQZ<MatrixType> m_realQZ;
ComplexVectorType m_tmp;
};
template<typename MatrixType>
GeneralizedEigenSolver<MatrixType>&
GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
{
check_template_parameters();
using std::sqrt;
using std::abs;
eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
Index size = A.cols();
m_valuesOkay = false;
m_vectorsOkay = false;
// Reduce to generalized real Schur form:
// A = Q S Z and B = Q T Z
m_realQZ.compute(A, B, computeEigenvectors);
if (m_realQZ.info() == Success)
{
// Resize storage
m_alphas.resize(size);
m_betas.resize(size);
if (computeEigenvectors)
{
m_eivec.resize(size,size);
m_tmp.resize(size);
}
// Aliases:
Map<VectorType> v(reinterpret_cast<Scalar*>(m_tmp.data()), size);
ComplexVectorType &cv = m_tmp;
const MatrixType &mZ = m_realQZ.matrixZ();
const MatrixType &mS = m_realQZ.matrixS();
const MatrixType &mT = m_realQZ.matrixT();
Index i = 0;
while (i < size)
{
if (i == size - 1 || mS.coeff(i+1, i) == Scalar(0))
{
// Real eigenvalue
m_alphas.coeffRef(i) = mS.diagonal().coeff(i);
m_betas.coeffRef(i) = mT.diagonal().coeff(i);
if (computeEigenvectors)
{
v.setConstant(Scalar(0.0));
v.coeffRef(i) = Scalar(1.0);
// For singular eigenvalues do nothing more
if(abs(m_betas.coeffRef(i)) >= (std::numeric_limits<RealScalar>::min)())
{
// Non-singular eigenvalue
const Scalar alpha = real(m_alphas.coeffRef(i));
const Scalar beta = m_betas.coeffRef(i);
for (Index j = i-1; j >= 0; j--)
{
const Index st = j+1;
const Index sz = i-j;
if (j > 0 && mS.coeff(j, j-1) != Scalar(0))
{
// 2x2 block
Matrix<Scalar, 2, 1> rhs = (alpha*mT.template block<2,Dynamic>(j-1,st,2,sz) - beta*mS.template block<2,Dynamic>(j-1,st,2,sz)) .lazyProduct( v.segment(st,sz) );
Matrix<Scalar, 2, 2> lhs = beta * mS.template block<2,2>(j-1,j-1) - alpha * mT.template block<2,2>(j-1,j-1);
v.template segment<2>(j-1) = lhs.partialPivLu().solve(rhs);
j--;
}
else
{
v.coeffRef(j) = -v.segment(st,sz).transpose().cwiseProduct(beta*mS.block(j,st,1,sz) - alpha*mT.block(j,st,1,sz)).sum() / (beta*mS.coeffRef(j,j) - alpha*mT.coeffRef(j,j));
}
}
}
m_eivec.col(i).real().noalias() = mZ.transpose() * v;
m_eivec.col(i).real().normalize();
m_eivec.col(i).imag().setConstant(0);
}
++i;
}
else
{
// We need to extract the generalized eigenvalues of the pair of a general 2x2 block S and a positive diagonal 2x2 block T
// Then taking beta=T_00*T_11, we can avoid any division, and alpha is the eigenvalues of A = (U^-1 * S * U) * diag(T_11,T_00):
// T = [a 0]
// [0 b]
RealScalar a = mT.diagonal().coeff(i),
b = mT.diagonal().coeff(i+1);
const RealScalar beta = m_betas.coeffRef(i) = m_betas.coeffRef(i+1) = a*b;
// ^^ NOTE: using diagonal()(i) instead of coeff(i,i) workarounds a MSVC bug.
Matrix<RealScalar,2,2> S2 = mS.template block<2,2>(i,i) * Matrix<Scalar,2,1>(b,a).asDiagonal();
Scalar p = Scalar(0.5) * (S2.coeff(0,0) - S2.coeff(1,1));
Scalar z = sqrt(abs(p * p + S2.coeff(1,0) * S2.coeff(0,1)));
const ComplexScalar alpha = ComplexScalar(S2.coeff(1,1) + p, (beta > 0) ? z : -z);
m_alphas.coeffRef(i) = conj(alpha);
m_alphas.coeffRef(i+1) = alpha;
if (computeEigenvectors) {
// Compute eigenvector in position (i+1) and then position (i) is just the conjugate
cv.setZero();
cv.coeffRef(i+1) = Scalar(1.0);
// here, the "static_cast" workaound expression template issues.
cv.coeffRef(i) = -(static_cast<Scalar>(beta*mS.coeffRef(i,i+1)) - alpha*mT.coeffRef(i,i+1))
/ (static_cast<Scalar>(beta*mS.coeffRef(i,i)) - alpha*mT.coeffRef(i,i));
for (Index j = i-1; j >= 0; j--)
{
const Index st = j+1;
const Index sz = i+1-j;
if (j > 0 && mS.coeff(j, j-1) != Scalar(0))
{
// 2x2 block
Matrix<ComplexScalar, 2, 1> rhs = (alpha*mT.template block<2,Dynamic>(j-1,st,2,sz) - beta*mS.template block<2,Dynamic>(j-1,st,2,sz)) .lazyProduct( cv.segment(st,sz) );
Matrix<ComplexScalar, 2, 2> lhs = beta * mS.template block<2,2>(j-1,j-1) - alpha * mT.template block<2,2>(j-1,j-1);
cv.template segment<2>(j-1) = lhs.partialPivLu().solve(rhs);
j--;
} else {
cv.coeffRef(j) = cv.segment(st,sz).transpose().cwiseProduct(beta*mS.block(j,st,1,sz) - alpha*mT.block(j,st,1,sz)).sum()
/ (alpha*mT.coeffRef(j,j) - static_cast<Scalar>(beta*mS.coeffRef(j,j)));
}
}
m_eivec.col(i+1).noalias() = (mZ.transpose() * cv);
m_eivec.col(i+1).normalize();
m_eivec.col(i) = m_eivec.col(i+1).conjugate();
}
i += 2;
}
}
m_valuesOkay = true;
m_vectorsOkay = computeEigenvectors;
}
return *this;
}
} // end namespace Eigen
#endif // EIGEN_GENERALIZEDEIGENSOLVER_H
| 17,191 | 39.933333 | 186 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
// Copyright (C) 2010 Jitse Niesen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
#define EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
#include "./Tridiagonalization.h"
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class GeneralizedSelfAdjointEigenSolver
*
* \brief Computes eigenvalues and eigenvectors of the generalized selfadjoint eigen problem
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* eigendecomposition; this is expected to be an instantiation of the Matrix
* class template.
*
* This class solves the generalized eigenvalue problem
* \f$ Av = \lambda Bv \f$. In this case, the matrix \f$ A \f$ should be
* selfadjoint and the matrix \f$ B \f$ should be positive definite.
*
* Only the \b lower \b triangular \b part of the input matrix is referenced.
*
* Call the function compute() to compute the eigenvalues and eigenvectors of
* a given matrix. Alternatively, you can use the
* GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)
* constructor which computes the eigenvalues and eigenvectors at construction time.
* Once the eigenvalue and eigenvectors are computed, they can be retrieved with the eigenvalues()
* and eigenvectors() functions.
*
* The documentation for GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)
* contains an example of the typical use of this class.
*
* \sa class SelfAdjointEigenSolver, class EigenSolver, class ComplexEigenSolver
*/
template<typename _MatrixType>
class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixType>
{
typedef SelfAdjointEigenSolver<_MatrixType> Base;
public:
typedef _MatrixType MatrixType;
/** \brief Default constructor for fixed-size matrices.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). This constructor
* can only be used if \p _MatrixType is a fixed-size matrix; use
* GeneralizedSelfAdjointEigenSolver(Index) for dynamic-size matrices.
*/
GeneralizedSelfAdjointEigenSolver() : Base() {}
/** \brief Constructor, pre-allocates memory for dynamic-size matrices.
*
* \param [in] size Positive integer, size of the matrix whose
* eigenvalues and eigenvectors will be computed.
*
* This constructor is useful for dynamic-size matrices, when the user
* intends to perform decompositions via compute(). The \p size
* parameter is only used as a hint. It is not an error to give a wrong
* \p size, but it may impair performance.
*
* \sa compute() for an example
*/
explicit GeneralizedSelfAdjointEigenSolver(Index size)
: Base(size)
{}
/** \brief Constructor; computes generalized eigendecomposition of given matrix pencil.
*
* \param[in] matA Selfadjoint matrix in matrix pencil.
* Only the lower triangular part of the matrix is referenced.
* \param[in] matB Positive-definite matrix in matrix pencil.
* Only the lower triangular part of the matrix is referenced.
* \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}.
* Default is #ComputeEigenvectors|#Ax_lBx.
*
* This constructor calls compute(const MatrixType&, const MatrixType&, int)
* to compute the eigenvalues and (if requested) the eigenvectors of the
* generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA the
* selfadjoint matrix \f$ A \f$ and \a matB the positive definite matrix
* \f$ B \f$. Each eigenvector \f$ x \f$ satisfies the property
* \f$ x^* B x = 1 \f$. The eigenvectors are computed if
* \a options contains ComputeEigenvectors.
*
* In addition, the two following variants can be solved via \p options:
* - \c ABx_lx: \f$ ABx = \lambda x \f$
* - \c BAx_lx: \f$ BAx = \lambda x \f$
*
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.out
*
* \sa compute(const MatrixType&, const MatrixType&, int)
*/
GeneralizedSelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB,
int options = ComputeEigenvectors|Ax_lBx)
: Base(matA.cols())
{
compute(matA, matB, options);
}
/** \brief Computes generalized eigendecomposition of given matrix pencil.
*
* \param[in] matA Selfadjoint matrix in matrix pencil.
* Only the lower triangular part of the matrix is referenced.
* \param[in] matB Positive-definite matrix in matrix pencil.
* Only the lower triangular part of the matrix is referenced.
* \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}.
* Default is #ComputeEigenvectors|#Ax_lBx.
*
* \returns Reference to \c *this
*
* Accoring to \p options, this function computes eigenvalues and (if requested)
* the eigenvectors of one of the following three generalized eigenproblems:
* - \c Ax_lBx: \f$ Ax = \lambda B x \f$
* - \c ABx_lx: \f$ ABx = \lambda x \f$
* - \c BAx_lx: \f$ BAx = \lambda x \f$
* with \a matA the selfadjoint matrix \f$ A \f$ and \a matB the positive definite
* matrix \f$ B \f$.
* In addition, each eigenvector \f$ x \f$ satisfies the property \f$ x^* B x = 1 \f$.
*
* The eigenvalues() function can be used to retrieve
* the eigenvalues. If \p options contains ComputeEigenvectors, then the
* eigenvectors are also computed and can be retrieved by calling
* eigenvectors().
*
* The implementation uses LLT to compute the Cholesky decomposition
* \f$ B = LL^* \f$ and computes the classical eigendecomposition
* of the selfadjoint matrix \f$ L^{-1} A (L^*)^{-1} \f$ if \p options contains Ax_lBx
* and of \f$ L^{*} A L \f$ otherwise. This solves the
* generalized eigenproblem, because any solution of the generalized
* eigenproblem \f$ Ax = \lambda B x \f$ corresponds to a solution
* \f$ L^{-1} A (L^*)^{-1} (L^* x) = \lambda (L^* x) \f$ of the
* eigenproblem for \f$ L^{-1} A (L^*)^{-1} \f$. Similar statements
* can be made for the two other variants.
*
* Example: \include SelfAdjointEigenSolver_compute_MatrixType2.cpp
* Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType2.out
*
* \sa GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)
*/
GeneralizedSelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB,
int options = ComputeEigenvectors|Ax_lBx);
protected:
};
template<typename MatrixType>
GeneralizedSelfAdjointEigenSolver<MatrixType>& GeneralizedSelfAdjointEigenSolver<MatrixType>::
compute(const MatrixType& matA, const MatrixType& matB, int options)
{
eigen_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& ((options&GenEigMask)==0 || (options&GenEigMask)==Ax_lBx
|| (options&GenEigMask)==ABx_lx || (options&GenEigMask)==BAx_lx)
&& "invalid option parameter");
bool computeEigVecs = ((options&EigVecMask)==0) || ((options&EigVecMask)==ComputeEigenvectors);
// Compute the cholesky decomposition of matB = L L' = U'U
LLT<MatrixType> cholB(matB);
int type = (options&GenEigMask);
if(type==0)
type = Ax_lBx;
if(type==Ax_lBx)
{
// compute C = inv(L) A inv(L')
MatrixType matC = matA.template selfadjointView<Lower>();
cholB.matrixL().template solveInPlace<OnTheLeft>(matC);
cholB.matrixU().template solveInPlace<OnTheRight>(matC);
Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly );
// transform back the eigen vectors: evecs = inv(U) * evecs
if(computeEigVecs)
cholB.matrixU().solveInPlace(Base::m_eivec);
}
else if(type==ABx_lx)
{
// compute C = L' A L
MatrixType matC = matA.template selfadjointView<Lower>();
matC = matC * cholB.matrixL();
matC = cholB.matrixU() * matC;
Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly);
// transform back the eigen vectors: evecs = inv(U) * evecs
if(computeEigVecs)
cholB.matrixU().solveInPlace(Base::m_eivec);
}
else if(type==BAx_lx)
{
// compute C = L' A L
MatrixType matC = matA.template selfadjointView<Lower>();
matC = matC * cholB.matrixL();
matC = cholB.matrixU() * matC;
Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly);
// transform back the eigen vectors: evecs = L * evecs
if(computeEigVecs)
Base::m_eivec = cholB.matrixL() * Base::m_eivec;
}
return *this;
}
} // end namespace Eigen
#endif // EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
| 9,715 | 41.801762 | 117 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/HessenbergDecomposition.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2010 Jitse Niesen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_HESSENBERGDECOMPOSITION_H
#define EIGEN_HESSENBERGDECOMPOSITION_H
namespace Eigen {
namespace internal {
template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType;
template<typename MatrixType>
struct traits<HessenbergDecompositionMatrixHReturnType<MatrixType> >
{
typedef MatrixType ReturnType;
};
}
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class HessenbergDecomposition
*
* \brief Reduces a square matrix to Hessenberg form by an orthogonal similarity transformation
*
* \tparam _MatrixType the type of the matrix of which we are computing the Hessenberg decomposition
*
* This class performs an Hessenberg decomposition of a matrix \f$ A \f$. In
* the real case, the Hessenberg decomposition consists of an orthogonal
* matrix \f$ Q \f$ and a Hessenberg matrix \f$ H \f$ such that \f$ A = Q H
* Q^T \f$. An orthogonal matrix is a matrix whose inverse equals its
* transpose (\f$ Q^{-1} = Q^T \f$). A Hessenberg matrix has zeros below the
* subdiagonal, so it is almost upper triangular. The Hessenberg decomposition
* of a complex matrix is \f$ A = Q H Q^* \f$ with \f$ Q \f$ unitary (that is,
* \f$ Q^{-1} = Q^* \f$).
*
* Call the function compute() to compute the Hessenberg decomposition of a
* given matrix. Alternatively, you can use the
* HessenbergDecomposition(const MatrixType&) constructor which computes the
* Hessenberg decomposition at construction time. Once the decomposition is
* computed, you can use the matrixH() and matrixQ() functions to construct
* the matrices H and Q in the decomposition.
*
* The documentation for matrixH() contains an example of the typical use of
* this class.
*
* \sa class ComplexSchur, class Tridiagonalization, \ref QR_Module "QR Module"
*/
template<typename _MatrixType> class HessenbergDecomposition
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
enum {
Size = MatrixType::RowsAtCompileTime,
SizeMinusOne = Size == Dynamic ? Dynamic : Size - 1,
Options = MatrixType::Options,
MaxSize = MatrixType::MaxRowsAtCompileTime,
MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : MaxSize - 1
};
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Type for vector of Householder coefficients.
*
* This is column vector with entries of type #Scalar. The length of the
* vector is one less than the size of #MatrixType, if it is a fixed-side
* type.
*/
typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
/** \brief Return type of matrixQ() */
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
typedef internal::HessenbergDecompositionMatrixHReturnType<MatrixType> MatrixHReturnType;
/** \brief Default constructor; the decomposition will be computed later.
*
* \param [in] size The size of the matrix whose Hessenberg decomposition will be computed.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). The \p size parameter is only
* used as a hint. It is not an error to give a wrong \p size, but it may
* impair performance.
*
* \sa compute() for an example.
*/
explicit HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size)
: m_matrix(size,size),
m_temp(size),
m_isInitialized(false)
{
if(size>1)
m_hCoeffs.resize(size-1);
}
/** \brief Constructor; computes Hessenberg decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
*
* This constructor calls compute() to compute the Hessenberg
* decomposition.
*
* \sa matrixH() for an example.
*/
template<typename InputType>
explicit HessenbergDecomposition(const EigenBase<InputType>& matrix)
: m_matrix(matrix.derived()),
m_temp(matrix.rows()),
m_isInitialized(false)
{
if(matrix.rows()<2)
{
m_isInitialized = true;
return;
}
m_hCoeffs.resize(matrix.rows()-1,1);
_compute(m_matrix, m_hCoeffs, m_temp);
m_isInitialized = true;
}
/** \brief Computes Hessenberg decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
* \returns Reference to \c *this
*
* The Hessenberg decomposition is computed by bringing the columns of the
* matrix successively in the required form using Householder reflections
* (see, e.g., Algorithm 7.4.2 in Golub \& Van Loan, <i>%Matrix
* Computations</i>). The cost is \f$ 10n^3/3 \f$ flops, where \f$ n \f$
* denotes the size of the given matrix.
*
* This method reuses of the allocated data in the HessenbergDecomposition
* object.
*
* Example: \include HessenbergDecomposition_compute.cpp
* Output: \verbinclude HessenbergDecomposition_compute.out
*/
template<typename InputType>
HessenbergDecomposition& compute(const EigenBase<InputType>& matrix)
{
m_matrix = matrix.derived();
if(matrix.rows()<2)
{
m_isInitialized = true;
return *this;
}
m_hCoeffs.resize(matrix.rows()-1,1);
_compute(m_matrix, m_hCoeffs, m_temp);
m_isInitialized = true;
return *this;
}
/** \brief Returns the Householder coefficients.
*
* \returns a const reference to the vector of Householder coefficients
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* The Householder coefficients allow the reconstruction of the matrix
* \f$ Q \f$ in the Hessenberg decomposition from the packed data.
*
* \sa packedMatrix(), \ref Householder_Module "Householder module"
*/
const CoeffVectorType& householderCoefficients() const
{
eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
return m_hCoeffs;
}
/** \brief Returns the internal representation of the decomposition
*
* \returns a const reference to a matrix with the internal representation
* of the decomposition.
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* The returned matrix contains the following information:
* - the upper part and lower sub-diagonal represent the Hessenberg matrix H
* - the rest of the lower part contains the Householder vectors that, combined with
* Householder coefficients returned by householderCoefficients(),
* allows to reconstruct the matrix Q as
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
* Here, the matrices \f$ H_i \f$ are the Householder transformations
* \f$ H_i = (I - h_i v_i v_i^T) \f$
* where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
* \f$ v_i \f$ is the Householder vector defined by
* \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
* with M the matrix returned by this function.
*
* See LAPACK for further details on this packed storage.
*
* Example: \include HessenbergDecomposition_packedMatrix.cpp
* Output: \verbinclude HessenbergDecomposition_packedMatrix.out
*
* \sa householderCoefficients()
*/
const MatrixType& packedMatrix() const
{
eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
return m_matrix;
}
/** \brief Reconstructs the orthogonal matrix Q in the decomposition
*
* \returns object representing the matrix Q
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* This function returns a light-weight object of template class
* HouseholderSequence. You can either apply it directly to a matrix or
* you can convert it to a matrix of type #MatrixType.
*
* \sa matrixH() for an example, class HouseholderSequence
*/
HouseholderSequenceType matrixQ() const
{
eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())
.setLength(m_matrix.rows() - 1)
.setShift(1);
}
/** \brief Constructs the Hessenberg matrix H in the decomposition
*
* \returns expression object representing the matrix H
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* The object returned by this function constructs the Hessenberg matrix H
* when it is assigned to a matrix or otherwise evaluated. The matrix H is
* constructed from the packed matrix as returned by packedMatrix(): The
* upper part (including the subdiagonal) of the packed matrix contains
* the matrix H. It may sometimes be better to directly use the packed
* matrix instead of constructing the matrix H.
*
* Example: \include HessenbergDecomposition_matrixH.cpp
* Output: \verbinclude HessenbergDecomposition_matrixH.out
*
* \sa matrixQ(), packedMatrix()
*/
MatrixHReturnType matrixH() const
{
eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
return MatrixHReturnType(*this);
}
private:
typedef Matrix<Scalar, 1, Size, Options | RowMajor, 1, MaxSize> VectorType;
typedef typename NumTraits<Scalar>::Real RealScalar;
static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp);
protected:
MatrixType m_matrix;
CoeffVectorType m_hCoeffs;
VectorType m_temp;
bool m_isInitialized;
};
/** \internal
* Performs a tridiagonal decomposition of \a matA in place.
*
* \param matA the input selfadjoint matrix
* \param hCoeffs returned Householder coefficients
*
* The result is written in the lower triangular part of \a matA.
*
* Implemented from Golub's "%Matrix Computations", algorithm 8.3.1.
*
* \sa packedMatrix()
*/
template<typename MatrixType>
void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)
{
eigen_assert(matA.rows()==matA.cols());
Index n = matA.rows();
temp.resize(n);
for (Index i = 0; i<n-1; ++i)
{
// let's consider the vector v = i-th column starting at position i+1
Index remainingSize = n-i-1;
RealScalar beta;
Scalar h;
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
matA.col(i).coeffRef(i+1) = beta;
hCoeffs.coeffRef(i) = h;
// Apply similarity transformation to remaining columns,
// i.e., compute A = H A H'
// A = H A
matA.bottomRightCorner(remainingSize, remainingSize)
.applyHouseholderOnTheLeft(matA.col(i).tail(remainingSize-1), h, &temp.coeffRef(0));
// A = A H'
matA.rightCols(remainingSize)
.applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), numext::conj(h), &temp.coeffRef(0));
}
}
namespace internal {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \brief Expression type for return value of HessenbergDecomposition::matrixH()
*
* \tparam MatrixType type of matrix in the Hessenberg decomposition
*
* Objects of this type represent the Hessenberg matrix in the Hessenberg
* decomposition of some matrix. The object holds a reference to the
* HessenbergDecomposition class until the it is assigned or evaluated for
* some other reason (the reference should remain valid during the life time
* of this object). This class is the return type of
* HessenbergDecomposition::matrixH(); there is probably no other use for this
* class.
*/
template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType
: public ReturnByValue<HessenbergDecompositionMatrixHReturnType<MatrixType> >
{
public:
/** \brief Constructor.
*
* \param[in] hess Hessenberg decomposition
*/
HessenbergDecompositionMatrixHReturnType(const HessenbergDecomposition<MatrixType>& hess) : m_hess(hess) { }
/** \brief Hessenberg matrix in decomposition.
*
* \param[out] result Hessenberg matrix in decomposition \p hess which
* was passed to the constructor
*/
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
result = m_hess.packedMatrix();
Index n = result.rows();
if (n>2)
result.bottomLeftCorner(n-2, n-2).template triangularView<Lower>().setZero();
}
Index rows() const { return m_hess.packedMatrix().rows(); }
Index cols() const { return m_hess.packedMatrix().cols(); }
protected:
const HessenbergDecomposition<MatrixType>& m_hess;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_HESSENBERGDECOMPOSITION_H
| 14,351 | 37.272 | 151 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2010 Jitse Niesen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATRIXBASEEIGENVALUES_H
#define EIGEN_MATRIXBASEEIGENVALUES_H
namespace Eigen {
namespace internal {
template<typename Derived, bool IsComplex>
struct eigenvalues_selector
{
// this is the implementation for the case IsComplex = true
static inline typename MatrixBase<Derived>::EigenvaluesReturnType const
run(const MatrixBase<Derived>& m)
{
typedef typename Derived::PlainObject PlainObject;
PlainObject m_eval(m);
return ComplexEigenSolver<PlainObject>(m_eval, false).eigenvalues();
}
};
template<typename Derived>
struct eigenvalues_selector<Derived, false>
{
static inline typename MatrixBase<Derived>::EigenvaluesReturnType const
run(const MatrixBase<Derived>& m)
{
typedef typename Derived::PlainObject PlainObject;
PlainObject m_eval(m);
return EigenSolver<PlainObject>(m_eval, false).eigenvalues();
}
};
} // end namespace internal
/** \brief Computes the eigenvalues of a matrix
* \returns Column vector containing the eigenvalues.
*
* \eigenvalues_module
* This function computes the eigenvalues with the help of the EigenSolver
* class (for real matrices) or the ComplexEigenSolver class (for complex
* matrices).
*
* The eigenvalues are repeated according to their algebraic multiplicity,
* so there are as many eigenvalues as rows in the matrix.
*
* The SelfAdjointView class provides a better algorithm for selfadjoint
* matrices.
*
* Example: \include MatrixBase_eigenvalues.cpp
* Output: \verbinclude MatrixBase_eigenvalues.out
*
* \sa EigenSolver::eigenvalues(), ComplexEigenSolver::eigenvalues(),
* SelfAdjointView::eigenvalues()
*/
template<typename Derived>
inline typename MatrixBase<Derived>::EigenvaluesReturnType
MatrixBase<Derived>::eigenvalues() const
{
typedef typename internal::traits<Derived>::Scalar Scalar;
return internal::eigenvalues_selector<Derived, NumTraits<Scalar>::IsComplex>::run(derived());
}
/** \brief Computes the eigenvalues of a matrix
* \returns Column vector containing the eigenvalues.
*
* \eigenvalues_module
* This function computes the eigenvalues with the help of the
* SelfAdjointEigenSolver class. The eigenvalues are repeated according to
* their algebraic multiplicity, so there are as many eigenvalues as rows in
* the matrix.
*
* Example: \include SelfAdjointView_eigenvalues.cpp
* Output: \verbinclude SelfAdjointView_eigenvalues.out
*
* \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues()
*/
template<typename MatrixType, unsigned int UpLo>
inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType
SelfAdjointView<MatrixType, UpLo>::eigenvalues() const
{
typedef typename SelfAdjointView<MatrixType, UpLo>::PlainObject PlainObject;
PlainObject thisAsMatrix(*this);
return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix, false).eigenvalues();
}
/** \brief Computes the L2 operator norm
* \returns Operator norm of the matrix.
*
* \eigenvalues_module
* This function computes the L2 operator norm of a matrix, which is also
* known as the spectral norm. The norm of a matrix \f$ A \f$ is defined to be
* \f[ \|A\|_2 = \max_x \frac{\|Ax\|_2}{\|x\|_2} \f]
* where the maximum is over all vectors and the norm on the right is the
* Euclidean vector norm. The norm equals the largest singular value, which is
* the square root of the largest eigenvalue of the positive semi-definite
* matrix \f$ A^*A \f$.
*
* The current implementation uses the eigenvalues of \f$ A^*A \f$, as computed
* by SelfAdjointView::eigenvalues(), to compute the operator norm of a
* matrix. The SelfAdjointView class provides a better algorithm for
* selfadjoint matrices.
*
* Example: \include MatrixBase_operatorNorm.cpp
* Output: \verbinclude MatrixBase_operatorNorm.out
*
* \sa SelfAdjointView::eigenvalues(), SelfAdjointView::operatorNorm()
*/
template<typename Derived>
inline typename MatrixBase<Derived>::RealScalar
MatrixBase<Derived>::operatorNorm() const
{
using std::sqrt;
typename Derived::PlainObject m_eval(derived());
// FIXME if it is really guaranteed that the eigenvalues are already sorted,
// then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
return sqrt((m_eval*m_eval.adjoint())
.eval()
.template selfadjointView<Lower>()
.eigenvalues()
.maxCoeff()
);
}
/** \brief Computes the L2 operator norm
* \returns Operator norm of the matrix.
*
* \eigenvalues_module
* This function computes the L2 operator norm of a self-adjoint matrix. For a
* self-adjoint matrix, the operator norm is the largest eigenvalue.
*
* The current implementation uses the eigenvalues of the matrix, as computed
* by eigenvalues(), to compute the operator norm of the matrix.
*
* Example: \include SelfAdjointView_operatorNorm.cpp
* Output: \verbinclude SelfAdjointView_operatorNorm.out
*
* \sa eigenvalues(), MatrixBase::operatorNorm()
*/
template<typename MatrixType, unsigned int UpLo>
inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar
SelfAdjointView<MatrixType, UpLo>::operatorNorm() const
{
return eigenvalues().cwiseAbs().maxCoeff();
}
} // end namespace Eigen
#endif
| 5,679 | 34.279503 | 96 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/RealQZ.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Alexey Korepanov <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REAL_QZ_H
#define EIGEN_REAL_QZ_H
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class RealQZ
*
* \brief Performs a real QZ decomposition of a pair of square matrices
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* real QZ decomposition; this is expected to be an instantiation of the
* Matrix class template.
*
* Given a real square matrices A and B, this class computes the real QZ
* decomposition: \f$ A = Q S Z \f$, \f$ B = Q T Z \f$ where Q and Z are
* real orthogonal matrixes, T is upper-triangular matrix, and S is upper
* quasi-triangular matrix. An orthogonal matrix is a matrix whose
* inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
* matrix is a block-triangular matrix whose diagonal consists of 1-by-1
* blocks and 2-by-2 blocks where further reduction is impossible due to
* complex eigenvalues.
*
* The eigenvalues of the pencil \f$ A - z B \f$ can be obtained from
* 1x1 and 2x2 blocks on the diagonals of S and T.
*
* Call the function compute() to compute the real QZ decomposition of a
* given pair of matrices. Alternatively, you can use the
* RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ)
* constructor which computes the real QZ decomposition at construction
* time. Once the decomposition is computed, you can use the matrixS(),
* matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices
* S, T, Q and Z in the decomposition. If computeQZ==false, some time
* is saved by not computing matrices Q and Z.
*
* Example: \include RealQZ_compute.cpp
* Output: \include RealQZ_compute.out
*
* \note The implementation is based on the algorithm in "Matrix Computations"
* by Gene H. Golub and Charles F. Van Loan, and a paper "An algorithm for
* generalized eigenvalue problems" by C.B.Moler and G.W.Stewart.
*
* \sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver
*/
template<typename _MatrixType> class RealQZ
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
/** \brief Default constructor.
*
* \param [in] size Positive integer, size of the matrix whose QZ decomposition will be computed.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). The \p size parameter is only
* used as a hint. It is not an error to give a wrong \p size, but it may
* impair performance.
*
* \sa compute() for an example.
*/
explicit RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) :
m_S(size, size),
m_T(size, size),
m_Q(size, size),
m_Z(size, size),
m_workspace(size*2),
m_maxIters(400),
m_isInitialized(false)
{ }
/** \brief Constructor; computes real QZ decomposition of given matrices
*
* \param[in] A Matrix A.
* \param[in] B Matrix B.
* \param[in] computeQZ If false, A and Z are not computed.
*
* This constructor calls compute() to compute the QZ decomposition.
*/
RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) :
m_S(A.rows(),A.cols()),
m_T(A.rows(),A.cols()),
m_Q(A.rows(),A.cols()),
m_Z(A.rows(),A.cols()),
m_workspace(A.rows()*2),
m_maxIters(400),
m_isInitialized(false) {
compute(A, B, computeQZ);
}
/** \brief Returns matrix Q in the QZ decomposition.
*
* \returns A const reference to the matrix Q.
*/
const MatrixType& matrixQ() const {
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
return m_Q;
}
/** \brief Returns matrix Z in the QZ decomposition.
*
* \returns A const reference to the matrix Z.
*/
const MatrixType& matrixZ() const {
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
return m_Z;
}
/** \brief Returns matrix S in the QZ decomposition.
*
* \returns A const reference to the matrix S.
*/
const MatrixType& matrixS() const {
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
return m_S;
}
/** \brief Returns matrix S in the QZ decomposition.
*
* \returns A const reference to the matrix S.
*/
const MatrixType& matrixT() const {
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
return m_T;
}
/** \brief Computes QZ decomposition of given matrix.
*
* \param[in] A Matrix A.
* \param[in] B Matrix B.
* \param[in] computeQZ If false, A and Z are not computed.
* \returns Reference to \c *this
*/
RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true);
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
return m_info;
}
/** \brief Returns number of performed QR-like iterations.
*/
Index iterations() const
{
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
return m_global_iter;
}
/** Sets the maximal number of iterations allowed to converge to one eigenvalue
* or decouple the problem.
*/
RealQZ& setMaxIterations(Index maxIters)
{
m_maxIters = maxIters;
return *this;
}
private:
MatrixType m_S, m_T, m_Q, m_Z;
Matrix<Scalar,Dynamic,1> m_workspace;
ComputationInfo m_info;
Index m_maxIters;
bool m_isInitialized;
bool m_computeQZ;
Scalar m_normOfT, m_normOfS;
Index m_global_iter;
typedef Matrix<Scalar,3,1> Vector3s;
typedef Matrix<Scalar,2,1> Vector2s;
typedef Matrix<Scalar,2,2> Matrix2s;
typedef JacobiRotation<Scalar> JRs;
void hessenbergTriangular();
void computeNorms();
Index findSmallSubdiagEntry(Index iu);
Index findSmallDiagEntry(Index f, Index l);
void splitOffTwoRows(Index i);
void pushDownZero(Index z, Index f, Index l);
void step(Index f, Index l, Index iter);
}; // RealQZ
/** \internal Reduces S and T to upper Hessenberg - triangular form */
template<typename MatrixType>
void RealQZ<MatrixType>::hessenbergTriangular()
{
const Index dim = m_S.cols();
// perform QR decomposition of T, overwrite T with R, save Q
HouseholderQR<MatrixType> qrT(m_T);
m_T = qrT.matrixQR();
m_T.template triangularView<StrictlyLower>().setZero();
m_Q = qrT.householderQ();
// overwrite S with Q* S
m_S.applyOnTheLeft(m_Q.adjoint());
// init Z as Identity
if (m_computeQZ)
m_Z = MatrixType::Identity(dim,dim);
// reduce S to upper Hessenberg with Givens rotations
for (Index j=0; j<=dim-3; j++) {
for (Index i=dim-1; i>=j+2; i--) {
JRs G;
// kill S(i,j)
if(m_S.coeff(i,j) != 0)
{
G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j));
m_S.coeffRef(i,j) = Scalar(0.0);
m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
// update Q
if (m_computeQZ)
m_Q.applyOnTheRight(i-1,i,G);
}
// kill T(i,i-1)
if(m_T.coeff(i,i-1)!=Scalar(0))
{
G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i));
m_T.coeffRef(i,i-1) = Scalar(0.0);
m_S.applyOnTheRight(i,i-1,G);
m_T.topRows(i).applyOnTheRight(i,i-1,G);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
}
}
}
}
/** \internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */
template<typename MatrixType>
inline void RealQZ<MatrixType>::computeNorms()
{
const Index size = m_S.cols();
m_normOfS = Scalar(0.0);
m_normOfT = Scalar(0.0);
for (Index j = 0; j < size; ++j)
{
m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum();
}
}
/** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */
template<typename MatrixType>
inline Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu)
{
using std::abs;
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res));
if (s == Scalar(0.0))
s = m_normOfS;
if (abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
break;
res--;
}
return res;
}
/** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1) */
template<typename MatrixType>
inline Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l)
{
using std::abs;
Index res = l;
while (res >= f) {
if (abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT)
break;
res--;
}
return res;
}
/** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */
template<typename MatrixType>
inline void RealQZ<MatrixType>::splitOffTwoRows(Index i)
{
using std::abs;
using std::sqrt;
const Index dim=m_S.cols();
if (abs(m_S.coeff(i+1,i))==Scalar(0))
return;
Index j = findSmallDiagEntry(i,i+1);
if (j==i-1)
{
// block of (S T^{-1})
Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView<Upper>().
template solve<OnTheRight>(m_S.template block<2,2>(i,i));
Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1));
Scalar q = p*p + STi(1,0)*STi(0,1);
if (q>=0) {
Scalar z = sqrt(q);
// one QR-like iteration for ABi - lambda I
// is enough - when we know exact eigenvalue in advance,
// convergence is immediate
JRs G;
if (p>=0)
G.makeGivens(p + z, STi(1,0));
else
G.makeGivens(p - z, STi(1,0));
m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
// update Q
if (m_computeQZ)
m_Q.applyOnTheRight(i,i+1,G);
G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i));
m_S.topRows(i+2).applyOnTheRight(i+1,i,G);
m_T.topRows(i+2).applyOnTheRight(i+1,i,G);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(i+1,i,G.adjoint());
m_S.coeffRef(i+1,i) = Scalar(0.0);
m_T.coeffRef(i+1,i) = Scalar(0.0);
}
}
else
{
pushDownZero(j,i,i+1);
}
}
/** \internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */
template<typename MatrixType>
inline void RealQZ<MatrixType>::pushDownZero(Index z, Index f, Index l)
{
JRs G;
const Index dim = m_S.cols();
for (Index zz=z; zz<l; zz++)
{
// push 0 down
Index firstColS = zz>f ? (zz-1) : zz;
G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1));
m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint());
m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint());
m_T.coeffRef(zz+1,zz+1) = Scalar(0.0);
// update Q
if (m_computeQZ)
m_Q.applyOnTheRight(zz,zz+1,G);
// kill S(zz+1, zz-1)
if (zz>f)
{
G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1));
m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G);
m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G);
m_S.coeffRef(zz+1,zz-1) = Scalar(0.0);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(zz,zz-1,G.adjoint());
}
}
// finally kill S(l,l-1)
G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1));
m_S.applyOnTheRight(l,l-1,G);
m_T.applyOnTheRight(l,l-1,G);
m_S.coeffRef(l,l-1)=Scalar(0.0);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(l,l-1,G.adjoint());
}
/** \internal QR-like iterative step for block f..l */
template<typename MatrixType>
inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter)
{
using std::abs;
const Index dim = m_S.cols();
// x, y, z
Scalar x, y, z;
if (iter==10)
{
// Wilkinson ad hoc shift
const Scalar
a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1),
a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1),
b12=m_T.coeff(f+0,f+1),
b11i=Scalar(1.0)/m_T.coeff(f+0,f+0),
b22i=Scalar(1.0)/m_T.coeff(f+1,f+1),
a87=m_S.coeff(l-1,l-2),
a98=m_S.coeff(l-0,l-1),
b77i=Scalar(1.0)/m_T.coeff(l-2,l-2),
b88i=Scalar(1.0)/m_T.coeff(l-1,l-1);
Scalar ss = abs(a87*b77i) + abs(a98*b88i),
lpl = Scalar(1.5)*ss,
ll = ss*ss;
x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i
- a11*a21*b12*b11i*b11i*b22i;
y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i
- a21*a21*b12*b11i*b11i*b22i;
z = a21*a32*b11i*b22i;
}
else if (iter==16)
{
// another exceptional shift
x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) /
(m_T.coeff(l-1,l-1)*m_T.coeff(l,l));
y = m_S.coeff(f+1,f)/m_T.coeff(f,f);
z = 0;
}
else if (iter>23 && !(iter%8))
{
// extremely exceptional shift
x = internal::random<Scalar>(-1.0,1.0);
y = internal::random<Scalar>(-1.0,1.0);
z = internal::random<Scalar>(-1.0,1.0);
}
else
{
// Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1
// where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where
// U and V are 2x2 bottom right sub matrices of A and B. Thus:
// = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1)
// = AB^-1AB^-1 + det(M) - tr(M)(AB^-1)
// Since we are only interested in having x, y, z with a correct ratio, we have:
const Scalar
a11 = m_S.coeff(f,f), a12 = m_S.coeff(f,f+1),
a21 = m_S.coeff(f+1,f), a22 = m_S.coeff(f+1,f+1),
a32 = m_S.coeff(f+2,f+1),
a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l),
a98 = m_S.coeff(l,l-1), a99 = m_S.coeff(l,l),
b11 = m_T.coeff(f,f), b12 = m_T.coeff(f,f+1),
b22 = m_T.coeff(f+1,f+1),
b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l),
b99 = m_T.coeff(l,l);
x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21)
+ a12/b22 - (a11/b11)*(b12/b22);
y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99);
z = a32/b22;
}
JRs G;
for (Index k=f; k<=l-2; k++)
{
// variables for Householder reflections
Vector2s essential2;
Scalar tau, beta;
Vector3s hr(x,y,z);
// Q_k to annihilate S(k+1,k-1) and S(k+2,k-1)
hr.makeHouseholderInPlace(tau, beta);
essential2 = hr.template bottomRows<2>();
Index fc=(std::max)(k-1,Index(0)); // first col to update
m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
if (m_computeQZ)
m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data());
if (k>f)
m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0);
// Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k)
hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1);
hr.makeHouseholderInPlace(tau, beta);
essential2 = hr.template bottomRows<2>();
{
Index lr = (std::min)(k+4,dim); // last row to update
Map<Matrix<Scalar,Dynamic,1> > tmp(m_workspace.data(),lr);
// S
tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2;
tmp += m_S.col(k+2).head(lr);
m_S.col(k+2).head(lr) -= tau*tmp;
m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
// T
tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2;
tmp += m_T.col(k+2).head(lr);
m_T.col(k+2).head(lr) -= tau*tmp;
m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
}
if (m_computeQZ)
{
// Z
Map<Matrix<Scalar,1,Dynamic> > tmp(m_workspace.data(),dim);
tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k));
tmp += m_Z.row(k+2);
m_Z.row(k+2) -= tau*tmp;
m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp);
}
m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0);
// Z_{k2} to annihilate T(k+1,k)
G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k));
m_S.applyOnTheRight(k+1,k,G);
m_T.applyOnTheRight(k+1,k,G);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(k+1,k,G.adjoint());
m_T.coeffRef(k+1,k) = Scalar(0.0);
// update x,y,z
x = m_S.coeff(k+1,k);
y = m_S.coeff(k+2,k);
if (k < l-2)
z = m_S.coeff(k+3,k);
} // loop over k
// Q_{n-1} to annihilate y = S(l,l-2)
G.makeGivens(x,y);
m_S.applyOnTheLeft(l-1,l,G.adjoint());
m_T.applyOnTheLeft(l-1,l,G.adjoint());
if (m_computeQZ)
m_Q.applyOnTheRight(l-1,l,G);
m_S.coeffRef(l,l-2) = Scalar(0.0);
// Z_{n-1} to annihilate T(l,l-1)
G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1));
m_S.applyOnTheRight(l,l-1,G);
m_T.applyOnTheRight(l,l-1,G);
if (m_computeQZ)
m_Z.applyOnTheLeft(l,l-1,G.adjoint());
m_T.coeffRef(l,l-1) = Scalar(0.0);
}
template<typename MatrixType>
RealQZ<MatrixType>& RealQZ<MatrixType>::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ)
{
const Index dim = A_in.cols();
eigen_assert (A_in.rows()==dim && A_in.cols()==dim
&& B_in.rows()==dim && B_in.cols()==dim
&& "Need square matrices of the same dimension");
m_isInitialized = true;
m_computeQZ = computeQZ;
m_S = A_in; m_T = B_in;
m_workspace.resize(dim*2);
m_global_iter = 0;
// entrance point: hessenberg triangular decomposition
hessenbergTriangular();
// compute L1 vector norms of T, S into m_normOfS, m_normOfT
computeNorms();
Index l = dim-1,
f,
local_iter = 0;
while (l>0 && local_iter<m_maxIters)
{
f = findSmallSubdiagEntry(l);
// now rows and columns f..l (including) decouple from the rest of the problem
if (f>0) m_S.coeffRef(f,f-1) = Scalar(0.0);
if (f == l) // One root found
{
l--;
local_iter = 0;
}
else if (f == l-1) // Two roots found
{
splitOffTwoRows(f);
l -= 2;
local_iter = 0;
}
else // No convergence yet
{
// if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations
Index z = findSmallDiagEntry(f,l);
if (z>=f)
{
// zero found
pushDownZero(z,f,l);
}
else
{
// We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg
// and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to
// apply a QR-like iteration to rows and columns f..l.
step(f,l, local_iter);
local_iter++;
m_global_iter++;
}
}
}
// check if we converged before reaching iterations limit
m_info = (local_iter<m_maxIters) ? Success : NoConvergence;
// For each non triangular 2x2 diagonal block of S,
// reduce the respective 2x2 diagonal block of T to positive diagonal form using 2x2 SVD.
// This step is not mandatory for QZ, but it does help further extraction of eigenvalues/eigenvectors,
// and is in par with Lapack/Matlab QZ.
if(m_info==Success)
{
for(Index i=0; i<dim-1; ++i)
{
if(m_S.coeff(i+1, i) != Scalar(0))
{
JacobiRotation<Scalar> j_left, j_right;
internal::real_2x2_jacobi_svd(m_T, i, i+1, &j_left, &j_right);
// Apply resulting Jacobi rotations
m_S.applyOnTheLeft(i,i+1,j_left);
m_S.applyOnTheRight(i,i+1,j_right);
m_T.applyOnTheLeft(i,i+1,j_left);
m_T.applyOnTheRight(i,i+1,j_right);
m_T(i+1,i) = m_T(i,i+1) = Scalar(0);
if(m_computeQZ) {
m_Q.applyOnTheRight(i,i+1,j_left.transpose());
m_Z.applyOnTheLeft(i,i+1,j_right.transpose());
}
i++;
}
}
}
return *this;
} // end compute
} // end namespace Eigen
#endif //EIGEN_REAL_QZ
| 23,586 | 35.010687 | 121 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/RealSchur.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2010,2012 Jitse Niesen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REAL_SCHUR_H
#define EIGEN_REAL_SCHUR_H
#include "./HessenbergDecomposition.h"
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class RealSchur
*
* \brief Performs a real Schur decomposition of a square matrix
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* real Schur decomposition; this is expected to be an instantiation of the
* Matrix class template.
*
* Given a real square matrix A, this class computes the real Schur
* decomposition: \f$ A = U T U^T \f$ where U is a real orthogonal matrix and
* T is a real quasi-triangular matrix. An orthogonal matrix is a matrix whose
* inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
* matrix is a block-triangular matrix whose diagonal consists of 1-by-1
* blocks and 2-by-2 blocks with complex eigenvalues. The eigenvalues of the
* blocks on the diagonal of T are the same as the eigenvalues of the matrix
* A, and thus the real Schur decomposition is used in EigenSolver to compute
* the eigendecomposition of a matrix.
*
* Call the function compute() to compute the real Schur decomposition of a
* given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool)
* constructor which computes the real Schur decomposition at construction
* time. Once the decomposition is computed, you can use the matrixU() and
* matrixT() functions to retrieve the matrices U and T in the decomposition.
*
* The documentation of RealSchur(const MatrixType&, bool) contains an example
* of the typical use of this class.
*
* \note The implementation is adapted from
* <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
* Their code is based on EISPACK.
*
* \sa class ComplexSchur, class EigenSolver, class ComplexEigenSolver
*/
template<typename _MatrixType> class RealSchur
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
/** \brief Default constructor.
*
* \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). The \p size parameter is only
* used as a hint. It is not an error to give a wrong \p size, but it may
* impair performance.
*
* \sa compute() for an example.
*/
explicit RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
: m_matT(size, size),
m_matU(size, size),
m_workspaceVector(size),
m_hess(size),
m_isInitialized(false),
m_matUisUptodate(false),
m_maxIters(-1)
{ }
/** \brief Constructor; computes real Schur decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
*
* This constructor calls compute() to compute the Schur decomposition.
*
* Example: \include RealSchur_RealSchur_MatrixType.cpp
* Output: \verbinclude RealSchur_RealSchur_MatrixType.out
*/
template<typename InputType>
explicit RealSchur(const EigenBase<InputType>& matrix, bool computeU = true)
: m_matT(matrix.rows(),matrix.cols()),
m_matU(matrix.rows(),matrix.cols()),
m_workspaceVector(matrix.rows()),
m_hess(matrix.rows()),
m_isInitialized(false),
m_matUisUptodate(false),
m_maxIters(-1)
{
compute(matrix.derived(), computeU);
}
/** \brief Returns the orthogonal matrix in the Schur decomposition.
*
* \returns A const reference to the matrix U.
*
* \pre Either the constructor RealSchur(const MatrixType&, bool) or the
* member function compute(const MatrixType&, bool) has been called before
* to compute the Schur decomposition of a matrix, and \p computeU was set
* to true (the default value).
*
* \sa RealSchur(const MatrixType&, bool) for an example
*/
const MatrixType& matrixU() const
{
eigen_assert(m_isInitialized && "RealSchur is not initialized.");
eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition.");
return m_matU;
}
/** \brief Returns the quasi-triangular matrix in the Schur decomposition.
*
* \returns A const reference to the matrix T.
*
* \pre Either the constructor RealSchur(const MatrixType&, bool) or the
* member function compute(const MatrixType&, bool) has been called before
* to compute the Schur decomposition of a matrix.
*
* \sa RealSchur(const MatrixType&, bool) for an example
*/
const MatrixType& matrixT() const
{
eigen_assert(m_isInitialized && "RealSchur is not initialized.");
return m_matT;
}
/** \brief Computes Schur decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
* \returns Reference to \c *this
*
* The Schur decomposition is computed by first reducing the matrix to
* Hessenberg form using the class HessenbergDecomposition. The Hessenberg
* matrix is then reduced to triangular form by performing Francis QR
* iterations with implicit double shift. The cost of computing the Schur
* decomposition depends on the number of iterations; as a rough guide, it
* may be taken to be \f$25n^3\f$ flops if \a computeU is true and
* \f$10n^3\f$ flops if \a computeU is false.
*
* Example: \include RealSchur_compute.cpp
* Output: \verbinclude RealSchur_compute.out
*
* \sa compute(const MatrixType&, bool, Index)
*/
template<typename InputType>
RealSchur& compute(const EigenBase<InputType>& matrix, bool computeU = true);
/** \brief Computes Schur decomposition of a Hessenberg matrix H = Z T Z^T
* \param[in] matrixH Matrix in Hessenberg form H
* \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T
* \param computeU Computes the matriX U of the Schur vectors
* \return Reference to \c *this
*
* This routine assumes that the matrix is already reduced in Hessenberg form matrixH
* using either the class HessenbergDecomposition or another mean.
* It computes the upper quasi-triangular matrix T of the Schur decomposition of H
* When computeU is true, this routine computes the matrix U such that
* A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix
*
* NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix
* is not available, the user should give an identity matrix (Q.setIdentity())
*
* \sa compute(const MatrixType&, bool)
*/
template<typename HessMatrixType, typename OrthMatrixType>
RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU);
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "RealSchur is not initialized.");
return m_info;
}
/** \brief Sets the maximum number of iterations allowed.
*
* If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
* of the matrix.
*/
RealSchur& setMaxIterations(Index maxIters)
{
m_maxIters = maxIters;
return *this;
}
/** \brief Returns the maximum number of iterations. */
Index getMaxIterations()
{
return m_maxIters;
}
/** \brief Maximum number of iterations per row.
*
* If not otherwise specified, the maximum number of iterations is this number times the size of the
* matrix. It is currently set to 40.
*/
static const int m_maxIterationsPerRow = 40;
private:
MatrixType m_matT;
MatrixType m_matU;
ColumnVectorType m_workspaceVector;
HessenbergDecomposition<MatrixType> m_hess;
ComputationInfo m_info;
bool m_isInitialized;
bool m_matUisUptodate;
Index m_maxIters;
typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT();
Index findSmallSubdiagEntry(Index iu);
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
};
template<typename MatrixType>
template<typename InputType>
RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeU)
{
const Scalar considerAsZero = (std::numeric_limits<Scalar>::min)();
eigen_assert(matrix.cols() == matrix.rows());
Index maxIters = m_maxIters;
if (maxIters == -1)
maxIters = m_maxIterationsPerRow * matrix.rows();
Scalar scale = matrix.derived().cwiseAbs().maxCoeff();
if(scale<considerAsZero)
{
m_matT.setZero(matrix.rows(),matrix.cols());
if(computeU)
m_matU.setIdentity(matrix.rows(),matrix.cols());
m_info = Success;
m_isInitialized = true;
m_matUisUptodate = computeU;
return *this;
}
// Step 1. Reduce to Hessenberg form
m_hess.compute(matrix.derived()/scale);
// Step 2. Reduce to real Schur form
computeFromHessenberg(m_hess.matrixH(), m_hess.matrixQ(), computeU);
m_matT *= scale;
return *this;
}
template<typename MatrixType>
template<typename HessMatrixType, typename OrthMatrixType>
RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU)
{
using std::abs;
m_matT = matrixH;
if(computeU)
m_matU = matrixQ;
Index maxIters = m_maxIters;
if (maxIters == -1)
maxIters = m_maxIterationsPerRow * matrixH.rows();
m_workspaceVector.resize(m_matT.cols());
Scalar* workspace = &m_workspaceVector.coeffRef(0);
// The matrix m_matT is divided in three parts.
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active window).
// Rows iu+1,...,end are already brought in triangular form.
Index iu = m_matT.cols() - 1;
Index iter = 0; // iteration count for current eigenvalue
Index totalIter = 0; // iteration count for whole matrix
Scalar exshift(0); // sum of exceptional shifts
Scalar norm = computeNormOfT();
if(norm!=0)
{
while (iu >= 0)
{
Index il = findSmallSubdiagEntry(iu);
// Check for convergence
if (il == iu) // One root found
{
m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift;
if (iu > 0)
m_matT.coeffRef(iu, iu-1) = Scalar(0);
iu--;
iter = 0;
}
else if (il == iu-1) // Two roots found
{
splitOffTwoRows(iu, computeU, exshift);
iu -= 2;
iter = 0;
}
else // No convergence yet
{
// The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG )
Vector3s firstHouseholderVector(0,0,0), shiftInfo;
computeShift(iu, iter, exshift, shiftInfo);
iter = iter + 1;
totalIter = totalIter + 1;
if (totalIter > maxIters) break;
Index im;
initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
}
}
}
if(totalIter <= maxIters)
m_info = Success;
else
m_info = NoConvergence;
m_isInitialized = true;
m_matUisUptodate = computeU;
return *this;
}
/** \internal Computes and returns vector L1 norm of T */
template<typename MatrixType>
inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
{
const Index size = m_matT.cols();
// FIXME to be efficient the following would requires a triangular reduxion code
// Scalar norm = m_matT.upper().cwiseAbs().sum()
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
Scalar norm(0);
for (Index j = 0; j < size; ++j)
norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
return norm;
}
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
inline Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
{
using std::abs;
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
break;
res--;
}
return res;
}
/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
template<typename MatrixType>
inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift)
{
using std::sqrt;
using std::abs;
const Index size = m_matT.cols();
// The eigenvalues of the 2x2 matrix [a b; c d] are
// trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu));
Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu); // q = tr^2 / 4 - det = discr/4
m_matT.coeffRef(iu,iu) += exshift;
m_matT.coeffRef(iu-1,iu-1) += exshift;
if (q >= Scalar(0)) // Two real eigenvalues
{
Scalar z = sqrt(abs(q));
JacobiRotation<Scalar> rot;
if (p >= Scalar(0))
rot.makeGivens(p + z, m_matT.coeff(iu, iu-1));
else
rot.makeGivens(p - z, m_matT.coeff(iu, iu-1));
m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint());
m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot);
m_matT.coeffRef(iu, iu-1) = Scalar(0);
if (computeU)
m_matU.applyOnTheRight(iu-1, iu, rot);
}
if (iu > 1)
m_matT.coeffRef(iu-1, iu-2) = Scalar(0);
}
/** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */
template<typename MatrixType>
inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
{
using std::sqrt;
using std::abs;
shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);
// Wilkinson's original ad hoc shift
if (iter == 10)
{
exshift += shiftInfo.coeff(0);
for (Index i = 0; i <= iu; ++i)
m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
Scalar s = abs(m_matT.coeff(iu,iu-1)) + abs(m_matT.coeff(iu-1,iu-2));
shiftInfo.coeffRef(0) = Scalar(0.75) * s;
shiftInfo.coeffRef(1) = Scalar(0.75) * s;
shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s;
}
// MATLAB's new ad hoc shift
if (iter == 30)
{
Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
s = s * s + shiftInfo.coeff(2);
if (s > Scalar(0))
{
s = sqrt(s);
if (shiftInfo.coeff(1) < shiftInfo.coeff(0))
s = -s;
s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s;
exshift += s;
for (Index i = 0; i <= iu; ++i)
m_matT.coeffRef(i,i) -= s;
shiftInfo.setConstant(Scalar(0.964));
}
}
}
/** \internal Compute index im at which Francis QR step starts and the first Householder vector. */
template<typename MatrixType>
inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
{
using std::abs;
Vector3s& v = firstHouseholderVector; // alias to save typing
for (im = iu-2; im >= il; --im)
{
const Scalar Tmm = m_matT.coeff(im,im);
const Scalar r = shiftInfo.coeff(0) - Tmm;
const Scalar s = shiftInfo.coeff(1) - Tmm;
v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1);
v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s;
v.coeffRef(2) = m_matT.coeff(im+2,im+1);
if (im == il) {
break;
}
const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
break;
}
}
/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */
template<typename MatrixType>
inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
{
eigen_assert(im >= il);
eigen_assert(im <= iu-2);
const Index size = m_matT.cols();
for (Index k = im; k <= iu-2; ++k)
{
bool firstIteration = (k == im);
Vector3s v;
if (firstIteration)
v = firstHouseholderVector;
else
v = m_matT.template block<3,1>(k,k-1);
Scalar tau, beta;
Matrix<Scalar, 2, 1> ess;
v.makeHouseholder(ess, tau, beta);
if (beta != Scalar(0)) // if v is not zero
{
if (firstIteration && k > il)
m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1);
else if (!firstIteration)
m_matT.coeffRef(k,k-1) = beta;
// These Householder transformations form the O(n^3) part of the algorithm
m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
if (computeU)
m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
}
}
Matrix<Scalar, 2, 1> v = m_matT.template block<2,1>(iu-1, iu-2);
Scalar tau, beta;
Matrix<Scalar, 1, 1> ess;
v.makeHouseholder(ess, tau, beta);
if (beta != Scalar(0)) // if v is not zero
{
m_matT.coeffRef(iu-1, iu-2) = beta;
m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace);
m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace);
if (computeU)
m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace);
}
// clean up pollution due to round-off errors
for (Index i = im+2; i <= iu; ++i)
{
m_matT.coeffRef(i,i-2) = Scalar(0);
if (i > im+2)
m_matT.coeffRef(i,i-3) = Scalar(0);
}
}
} // end namespace Eigen
#endif // EIGEN_REAL_SCHUR_H
| 20,268 | 36.054845 | 159 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Real Schur needed to real unsymmetrical eigenvalues/eigenvectors.
********************************************************************************
*/
#ifndef EIGEN_REAL_SCHUR_LAPACKE_H
#define EIGEN_REAL_SCHUR_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_SCHUR_REAL(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, LAPACKE_PREFIX_U, EIGCOLROW, LAPACKE_COLROW) \
template<> template<typename InputType> inline \
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, bool computeU) \
{ \
eigen_assert(matrix.cols() == matrix.rows()); \
\
lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), sdim, info; \
lapack_int matrix_order = LAPACKE_COLROW; \
char jobvs, sort='N'; \
LAPACK_##LAPACKE_PREFIX_U##_SELECT2 select = 0; \
jobvs = (computeU) ? 'V' : 'N'; \
m_matU.resize(n, n); \
lapack_int ldvs = internal::convert_index<lapack_int>(m_matU.outerStride()); \
m_matT = matrix; \
lapack_int lda = internal::convert_index<lapack_int>(m_matT.outerStride()); \
Matrix<EIGTYPE, Dynamic, Dynamic> wr, wi; \
wr.resize(n, 1); wi.resize(n, 1); \
info = LAPACKE_##LAPACKE_PREFIX##gees( matrix_order, jobvs, sort, select, n, (LAPACKE_TYPE*)m_matT.data(), lda, &sdim, (LAPACKE_TYPE*)wr.data(), (LAPACKE_TYPE*)wi.data(), (LAPACKE_TYPE*)m_matU.data(), ldvs ); \
if(info == 0) \
m_info = Success; \
else \
m_info = NoConvergence; \
\
m_isInitialized = true; \
m_matUisUptodate = computeU; \
return *this; \
\
}
EIGEN_LAPACKE_SCHUR_REAL(double, double, d, D, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SCHUR_REAL(float, float, s, S, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SCHUR_REAL(double, double, d, D, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SCHUR_REAL(float, float, s, S, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen
#endif // EIGEN_REAL_SCHUR_LAPACKE_H
| 3,650 | 45.807692 | 212 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
// Copyright (C) 2010 Jitse Niesen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
#define EIGEN_SELFADJOINTEIGENSOLVER_H
#include "./Tridiagonalization.h"
namespace Eigen {
template<typename _MatrixType>
class GeneralizedSelfAdjointEigenSolver;
namespace internal {
template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
template<typename MatrixType, typename DiagType, typename SubDiagType>
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec);
}
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class SelfAdjointEigenSolver
*
* \brief Computes eigenvalues and eigenvectors of selfadjoint matrices
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* eigendecomposition; this is expected to be an instantiation of the Matrix
* class template.
*
* A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
* matrices, this means that the matrix is symmetric: it equals its
* transpose. This class computes the eigenvalues and eigenvectors of a
* selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
* \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a
* selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
* the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
* eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint
* matrices, the matrix \f$ V \f$ is always invertible). This is called the
* eigendecomposition.
*
* The algorithm exploits the fact that the matrix is selfadjoint, making it
* faster and more accurate than the general purpose eigenvalue algorithms
* implemented in EigenSolver and ComplexEigenSolver.
*
* Only the \b lower \b triangular \b part of the input matrix is referenced.
*
* Call the function compute() to compute the eigenvalues and eigenvectors of
* a given matrix. Alternatively, you can use the
* SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes
* the eigenvalues and eigenvectors at construction time. Once the eigenvalue
* and eigenvectors are computed, they can be retrieved with the eigenvalues()
* and eigenvectors() functions.
*
* The documentation for SelfAdjointEigenSolver(const MatrixType&, int)
* contains an example of the typical use of this class.
*
* To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and
* the likes, see the class GeneralizedSelfAdjointEigenSolver.
*
* \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
*/
template<typename _MatrixType> class SelfAdjointEigenSolver
{
public:
typedef _MatrixType MatrixType;
enum {
Size = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
/** \brief Real scalar type for \p _MatrixType.
*
* This is just \c Scalar if #Scalar is real (e.g., \c float or
* \c double), and the type of the real part of \c Scalar if #Scalar is
* complex.
*/
typedef typename NumTraits<Scalar>::Real RealScalar;
friend struct internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>;
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
*
* This is a column vector with entries of type #RealScalar.
* The length of the vector is the size of \p _MatrixType.
*/
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
typedef Tridiagonalization<MatrixType> TridiagonalizationType;
typedef typename TridiagonalizationType::SubDiagonalType SubDiagonalType;
/** \brief Default constructor for fixed-size matrices.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). This constructor
* can only be used if \p _MatrixType is a fixed-size matrix; use
* SelfAdjointEigenSolver(Index) for dynamic-size matrices.
*
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
*/
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver()
: m_eivec(),
m_eivalues(),
m_subdiag(),
m_isInitialized(false)
{ }
/** \brief Constructor, pre-allocates memory for dynamic-size matrices.
*
* \param [in] size Positive integer, size of the matrix whose
* eigenvalues and eigenvectors will be computed.
*
* This constructor is useful for dynamic-size matrices, when the user
* intends to perform decompositions via compute(). The \p size
* parameter is only used as a hint. It is not an error to give a wrong
* \p size, but it may impair performance.
*
* \sa compute() for an example
*/
EIGEN_DEVICE_FUNC
explicit SelfAdjointEigenSolver(Index size)
: m_eivec(size, size),
m_eivalues(size),
m_subdiag(size > 1 ? size - 1 : 1),
m_isInitialized(false)
{}
/** \brief Constructor; computes eigendecomposition of given matrix.
*
* \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
* be computed. Only the lower triangular part of the matrix is referenced.
* \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
*
* This constructor calls compute(const MatrixType&, int) to compute the
* eigenvalues of the matrix \p matrix. The eigenvectors are computed if
* \p options equals #ComputeEigenvectors.
*
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
*
* \sa compute(const MatrixType&, int)
*/
template<typename InputType>
EIGEN_DEVICE_FUNC
explicit SelfAdjointEigenSolver(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors)
: m_eivec(matrix.rows(), matrix.cols()),
m_eivalues(matrix.cols()),
m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
m_isInitialized(false)
{
compute(matrix.derived(), options);
}
/** \brief Computes eigendecomposition of given matrix.
*
* \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
* be computed. Only the lower triangular part of the matrix is referenced.
* \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
* \returns Reference to \c *this
*
* This function computes the eigenvalues of \p matrix. The eigenvalues()
* function can be used to retrieve them. If \p options equals #ComputeEigenvectors,
* then the eigenvectors are also computed and can be retrieved by
* calling eigenvectors().
*
* This implementation uses a symmetric QR algorithm. The matrix is first
* reduced to tridiagonal form using the Tridiagonalization class. The
* tridiagonal matrix is then brought to diagonal form with implicit
* symmetric QR steps with Wilkinson shift. Details can be found in
* Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>.
*
* The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors
* are required and \f$ 4n^3/3 \f$ if they are not required.
*
* This method reuses the memory in the SelfAdjointEigenSolver object that
* was allocated when the object was constructed, if the size of the
* matrix does not change.
*
* Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp
* Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out
*
* \sa SelfAdjointEigenSolver(const MatrixType&, int)
*/
template<typename InputType>
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver& compute(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors);
/** \brief Computes eigendecomposition of given matrix using a closed-form algorithm
*
* This is a variant of compute(const MatrixType&, int options) which
* directly solves the underlying polynomial equation.
*
* Currently only 2x2 and 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d).
*
* This method is usually significantly faster than the QR iterative algorithm
* but it might also be less accurate. It is also worth noting that
* for 3x3 matrices it involves trigonometric operations which are
* not necessarily available for all scalar types.
*
* For the 3x3 case, we observed the following worst case relative error regarding the eigenvalues:
* - double: 1e-8
* - float: 1e-3
*
* \sa compute(const MatrixType&, int options)
*/
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors);
/**
*\brief Computes the eigen decomposition from a tridiagonal symmetric matrix
*
* \param[in] diag The vector containing the diagonal of the matrix.
* \param[in] subdiag The subdiagonal of the matrix.
* \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
* \returns Reference to \c *this
*
* This function assumes that the matrix has been reduced to tridiagonal form.
*
* \sa compute(const MatrixType&, int) for more information
*/
SelfAdjointEigenSolver& computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options=ComputeEigenvectors);
/** \brief Returns the eigenvectors of given matrix.
*
* \returns A const reference to the matrix whose columns are the eigenvectors.
*
* \pre The eigenvectors have been computed before.
*
* Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
* to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
* eigenvectors are normalized to have (Euclidean) norm equal to one. If
* this object was used to solve the eigenproblem for the selfadjoint
* matrix \f$ A \f$, then the matrix returned by this function is the
* matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
*
* Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
* Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
*
* \sa eigenvalues()
*/
EIGEN_DEVICE_FUNC
const EigenvectorsType& eigenvectors() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec;
}
/** \brief Returns the eigenvalues of given matrix.
*
* \returns A const reference to the column vector containing the eigenvalues.
*
* \pre The eigenvalues have been computed before.
*
* The eigenvalues are repeated according to their algebraic multiplicity,
* so there are as many eigenvalues as rows in the matrix. The eigenvalues
* are sorted in increasing order.
*
* Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
* Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
*
* \sa eigenvectors(), MatrixBase::eigenvalues()
*/
EIGEN_DEVICE_FUNC
const RealVectorType& eigenvalues() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
return m_eivalues;
}
/** \brief Computes the positive-definite square root of the matrix.
*
* \returns the positive-definite square root of the matrix
*
* \pre The eigenvalues and eigenvectors of a positive-definite matrix
* have been computed before.
*
* The square root of a positive-definite matrix \f$ A \f$ is the
* positive-definite matrix whose square equals \f$ A \f$. This function
* uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
* square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
*
* Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
* Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
*
* \sa operatorInverseSqrt(), <a href="unsupported/group__MatrixFunctions__Module.html">MatrixFunctions Module</a>
*/
EIGEN_DEVICE_FUNC
MatrixType operatorSqrt() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
}
/** \brief Computes the inverse square root of the matrix.
*
* \returns the inverse positive-definite square root of the matrix
*
* \pre The eigenvalues and eigenvectors of a positive-definite matrix
* have been computed before.
*
* This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
* compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
* cheaper than first computing the square root with operatorSqrt() and
* then its inverse with MatrixBase::inverse().
*
* Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
* Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
*
* \sa operatorSqrt(), MatrixBase::inverse(), <a href="unsupported/group__MatrixFunctions__Module.html">MatrixFunctions Module</a>
*/
EIGEN_DEVICE_FUNC
MatrixType operatorInverseSqrt() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
}
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
EIGEN_DEVICE_FUNC
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
return m_info;
}
/** \brief Maximum number of iterations.
*
* The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n
* denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK).
*/
static const int m_maxIterations = 30;
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
EigenvectorsType m_eivec;
RealVectorType m_eivalues;
typename TridiagonalizationType::SubDiagonalType m_subdiag;
ComputationInfo m_info;
bool m_isInitialized;
bool m_eigenvectorsOk;
};
namespace internal {
/** \internal
*
* \eigenvalues_module \ingroup Eigenvalues_Module
*
* Performs a QR step on a tridiagonal symmetric matrix represented as a
* pair of two vectors \a diag and \a subdiag.
*
* \param diag the diagonal part of the input selfadjoint tridiagonal matrix
* \param subdiag the sub-diagonal part of the input selfadjoint tridiagonal matrix
* \param start starting index of the submatrix to work on
* \param end last+1 index of the submatrix to work on
* \param matrixQ pointer to the column-major matrix holding the eigenvectors, can be 0
* \param n size of the input matrix
*
* For compilation efficiency reasons, this procedure does not use eigen expression
* for its arguments.
*
* Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
* "implicit symmetric QR step with Wilkinson shift"
*/
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
EIGEN_DEVICE_FUNC
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
}
template<typename MatrixType>
template<typename InputType>
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::compute(const EigenBase<InputType>& a_matrix, int options)
{
check_template_parameters();
const InputType &matrix(a_matrix.derived());
using std::abs;
eigen_assert(matrix.cols() == matrix.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
Index n = matrix.cols();
m_eivalues.resize(n,1);
if(n==1)
{
m_eivec = matrix;
m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0));
if(computeEigenvectors)
m_eivec.setOnes(n,n);
m_info = Success;
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
// declare some aliases
RealVectorType& diag = m_eivalues;
EigenvectorsType& mat = m_eivec;
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
mat = matrix.template triangularView<Lower>();
RealScalar scale = mat.cwiseAbs().maxCoeff();
if(scale==RealScalar(0)) scale = RealScalar(1);
mat.template triangularView<Lower>() /= scale;
m_subdiag.resize(n-1);
internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
m_info = internal::computeFromTridiagonal_impl(diag, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
// scale back the eigen values
m_eivalues *= scale;
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
template<typename MatrixType>
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options)
{
//TODO : Add an option to scale the values beforehand
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
m_eivalues = diag;
m_subdiag = subdiag;
if (computeEigenvectors)
{
m_eivec.setIdentity(diag.size(), diag.size());
}
m_info = internal::computeFromTridiagonal_impl(m_eivalues, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
namespace internal {
/**
* \internal
* \brief Compute the eigendecomposition from a tridiagonal matrix
*
* \param[in,out] diag : On input, the diagonal of the matrix, on output the eigenvalues
* \param[in,out] subdiag : The subdiagonal part of the matrix (entries are modified during the decomposition)
* \param[in] maxIterations : the maximum number of iterations
* \param[in] computeEigenvectors : whether the eigenvectors have to be computed or not
* \param[out] eivec : The matrix to store the eigenvectors if computeEigenvectors==true. Must be allocated on input.
* \returns \c Success or \c NoConvergence
*/
template<typename MatrixType, typename DiagType, typename SubDiagType>
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec)
{
using std::abs;
ComputationInfo info;
typedef typename MatrixType::Scalar Scalar;
Index n = diag.size();
Index end = n-1;
Index start = 0;
Index iter = 0; // total number of iterations
typedef typename DiagType::RealScalar RealScalar;
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();
while (end>0)
{
for (Index i = start; i<end; ++i)
if (internal::isMuchSmallerThan(abs(subdiag[i]),(abs(diag[i])+abs(diag[i+1])),precision) || abs(subdiag[i]) <= considerAsZero)
subdiag[i] = 0;
// find the largest unreduced block
while (end>0 && subdiag[end-1]==RealScalar(0))
{
end--;
}
if (end<=0)
break;
// if we spent too many iterations, we give up
iter++;
if(iter > maxIterations * n) break;
start = end - 1;
while (start>0 && subdiag[start-1]!=0)
start--;
internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), subdiag.data(), start, end, computeEigenvectors ? eivec.data() : (Scalar*)0, n);
}
if (iter <= maxIterations * n)
info = Success;
else
info = NoConvergence;
// Sort eigenvalues and corresponding vectors.
// TODO make the sort optional ?
// TODO use a better sort algorithm !!
if (info == Success)
{
for (Index i = 0; i < n-1; ++i)
{
Index k;
diag.segment(i,n-i).minCoeff(&k);
if (k > 0)
{
std::swap(diag[i], diag[k+i]);
if(computeEigenvectors)
eivec.col(i).swap(eivec.col(k+i));
}
}
}
return info;
}
template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues
{
EIGEN_DEVICE_FUNC
static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options)
{ eig.compute(A,options); }
};
template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3,false>
{
typedef typename SolverType::MatrixType MatrixType;
typedef typename SolverType::RealVectorType VectorType;
typedef typename SolverType::Scalar Scalar;
typedef typename SolverType::EigenvectorsType EigenvectorsType;
/** \internal
* Computes the roots of the characteristic polynomial of \a m.
* For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
*/
EIGEN_DEVICE_FUNC
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
EIGEN_USING_STD_MATH(sqrt)
EIGEN_USING_STD_MATH(atan2)
EIGEN_USING_STD_MATH(cos)
EIGEN_USING_STD_MATH(sin)
const Scalar s_inv3 = Scalar(1)/Scalar(3);
const Scalar s_sqrt3 = sqrt(Scalar(3));
// The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The
// eigenvalues are the roots to this equation, all guaranteed to be
// real-valued, because the matrix is symmetric.
Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0);
Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1);
Scalar c2 = m(0,0) + m(1,1) + m(2,2);
// Construct the parameters used in classifying the roots of the equation
// and in solving the equation for the roots in closed form.
Scalar c2_over_3 = c2*s_inv3;
Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
a_over_3 = numext::maxi(a_over_3, Scalar(0));
Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
q = numext::maxi(q, Scalar(0));
// Compute the eigenvalues by solving for the roots of the polynomial.
Scalar rho = sqrt(a_over_3);
Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
Scalar cos_theta = cos(theta);
Scalar sin_theta = sin(theta);
// roots are already sorted, since cos is monotonically decreasing on [0, pi]
roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
}
EIGEN_DEVICE_FUNC
static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
{
using std::abs;
Index i0;
// Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
mat.diagonal().cwiseAbs().maxCoeff(&i0);
// mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
// so let's save it:
representative = mat.col(i0);
Scalar n0, n1;
VectorType c0, c1;
n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
if(n0>n1) res = c0/std::sqrt(n0);
else res = c1/std::sqrt(n1);
return true;
}
EIGEN_DEVICE_FUNC
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
EigenvectorsType& eivecs = solver.m_eivec;
VectorType& eivals = solver.m_eivalues;
// Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
Scalar shift = mat.trace() / Scalar(3);
// TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
MatrixType scaledMat = mat.template selfadjointView<Lower>();
scaledMat.diagonal().array() -= shift;
Scalar scale = scaledMat.cwiseAbs().maxCoeff();
if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations
// compute the eigenvalues
computeRoots(scaledMat,eivals);
// compute the eigenvectors
if(computeEigenvectors)
{
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
{
// All three eigenvalues are numerically the same
eivecs.setIdentity();
}
else
{
MatrixType tmp;
tmp = scaledMat;
// Compute the eigenvector of the most distinct eigenvalue
Scalar d0 = eivals(2) - eivals(1);
Scalar d1 = eivals(1) - eivals(0);
Index k(0), l(2);
if(d0 > d1)
{
numext::swap(k,l);
d0 = d1;
}
// Compute the eigenvector of index k
{
tmp.diagonal().array () -= eivals(k);
// By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
}
// Compute eigenvector of index l
if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
{
// If d0 is too small, then the two other eigenvalues are numerically the same,
// and thus we only have to ortho-normalize the near orthogonal vector we saved above.
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
eivecs.col(l).normalize();
}
else
{
tmp = scaledMat;
tmp.diagonal().array () -= eivals(l);
VectorType dummy;
extract_kernel(tmp, eivecs.col(l), dummy);
}
// Compute last eigenvector from the other two
eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
}
}
// Rescale back to the original size.
eivals *= scale;
eivals.array() += shift;
solver.m_info = Success;
solver.m_isInitialized = true;
solver.m_eigenvectorsOk = computeEigenvectors;
}
};
// 2x2 direct eigenvalues decomposition, code from Hauke Heibel
template<typename SolverType>
struct direct_selfadjoint_eigenvalues<SolverType,2,false>
{
typedef typename SolverType::MatrixType MatrixType;
typedef typename SolverType::RealVectorType VectorType;
typedef typename SolverType::Scalar Scalar;
typedef typename SolverType::EigenvectorsType EigenvectorsType;
EIGEN_DEVICE_FUNC
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
using std::sqrt;
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
roots(0) = t1 - t0;
roots(1) = t1 + t0;
}
EIGEN_DEVICE_FUNC
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
EIGEN_USING_STD_MATH(sqrt);
EIGEN_USING_STD_MATH(abs);
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
EigenvectorsType& eivecs = solver.m_eivec;
VectorType& eivals = solver.m_eivalues;
// Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
Scalar shift = mat.trace() / Scalar(2);
MatrixType scaledMat = mat;
scaledMat.coeffRef(0,1) = mat.coeff(1,0);
scaledMat.diagonal().array() -= shift;
Scalar scale = scaledMat.cwiseAbs().maxCoeff();
if(scale > Scalar(0))
scaledMat /= scale;
// Compute the eigenvalues
computeRoots(scaledMat,eivals);
// compute the eigen vectors
if(computeEigenvectors)
{
if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
{
eivecs.setIdentity();
}
else
{
scaledMat.diagonal().array () -= eivals(1);
Scalar a2 = numext::abs2(scaledMat(0,0));
Scalar c2 = numext::abs2(scaledMat(1,1));
Scalar b2 = numext::abs2(scaledMat(1,0));
if(a2>c2)
{
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
eivecs.col(1) /= sqrt(a2+b2);
}
else
{
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
eivecs.col(1) /= sqrt(c2+b2);
}
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
}
}
// Rescale back to the original size.
eivals *= scale;
eivals.array() += shift;
solver.m_info = Success;
solver.m_isInitialized = true;
solver.m_eigenvectorsOk = computeEigenvectors;
}
};
}
template<typename MatrixType>
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::computeDirect(const MatrixType& matrix, int options)
{
internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options);
return *this;
}
namespace internal {
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
EIGEN_DEVICE_FUNC
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
{
using std::abs;
RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
RealScalar e = subdiag[end-1];
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
// underflow thus leading to inf/NaN values when using the following commented code:
// RealScalar e2 = numext::abs2(subdiag[end-1]);
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
// This explain the following, somewhat more complicated, version:
RealScalar mu = diag[end];
if(td==RealScalar(0))
mu -= abs(e);
else
{
RealScalar e2 = numext::abs2(subdiag[end-1]);
RealScalar h = numext::hypot(td,e);
if(e2==RealScalar(0)) mu -= (e / (td + (td>RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h);
else mu -= e2 / (td + (td>RealScalar(0) ? h : -h));
}
RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start];
for (Index k = start; k < end; ++k)
{
JacobiRotation<RealScalar> rot;
rot.makeGivens(x, z);
// do T = G' T G
RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
if (k > start)
subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
x = subdiag[k];
if (k < end - 1)
{
z = -rot.s() * subdiag[k+1];
subdiag[k + 1] = rot.c() * subdiag[k+1];
}
// apply the givens rotation to the unit matrix Q = Q * G
if (matrixQ)
{
// FIXME if StorageOrder == RowMajor this operation is not very efficient
Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
q.applyOnTheRight(k,k+1,rot);
}
}
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SELFADJOINTEIGENSOLVER_H
| 33,674 | 37.662457 | 180 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Self-adjoint eigenvalues/eigenvectors.
********************************************************************************
*/
#ifndef EIGEN_SAEIGENSOLVER_LAPACKE_H
#define EIGEN_SAEIGENSOLVER_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_EIG_SELFADJ(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, EIGCOLROW, LAPACKE_COLROW ) \
template<> template<typename InputType> inline \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, int options) \
{ \
eigen_assert(matrix.cols() == matrix.rows()); \
eigen_assert((options&~(EigVecMask|GenEigMask))==0 \
&& (options&EigVecMask)!=EigVecMask \
&& "invalid option parameter"); \
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \
lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), lda, matrix_order, info; \
m_eivalues.resize(n,1); \
m_subdiag.resize(n-1); \
m_eivec = matrix; \
\
if(n==1) \
{ \
m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0)); \
if(computeEigenvectors) m_eivec.setOnes(n,n); \
m_info = Success; \
m_isInitialized = true; \
m_eigenvectorsOk = computeEigenvectors; \
return *this; \
} \
\
lda = internal::convert_index<lapack_int>(m_eivec.outerStride()); \
matrix_order=LAPACKE_COLROW; \
char jobz, uplo='L'/*, range='A'*/; \
jobz = computeEigenvectors ? 'V' : 'N'; \
\
info = LAPACKE_##LAPACKE_NAME( matrix_order, jobz, uplo, n, (LAPACKE_TYPE*)m_eivec.data(), lda, (LAPACKE_RTYPE*)m_eivalues.data() ); \
m_info = (info==0) ? Success : NoConvergence; \
m_isInitialized = true; \
m_eigenvectorsOk = computeEigenvectors; \
return *this; \
}
EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen
#endif // EIGEN_SAEIGENSOLVER_H
| 4,378 | 47.120879 | 136 |
h
|
abess
|
abess-master/python/include/Eigen/src/Eigenvalues/Tridiagonalization.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2010 Jitse Niesen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TRIDIAGONALIZATION_H
#define EIGEN_TRIDIAGONALIZATION_H
namespace Eigen {
namespace internal {
template<typename MatrixType> struct TridiagonalizationMatrixTReturnType;
template<typename MatrixType>
struct traits<TridiagonalizationMatrixTReturnType<MatrixType> >
: public traits<typename MatrixType::PlainObject>
{
typedef typename MatrixType::PlainObject ReturnType; // FIXME shall it be a BandMatrix?
enum { Flags = 0 };
};
template<typename MatrixType, typename CoeffVectorType>
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs);
}
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class Tridiagonalization
*
* \brief Tridiagonal decomposition of a selfadjoint matrix
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* tridiagonal decomposition; this is expected to be an instantiation of the
* Matrix class template.
*
* This class performs a tridiagonal decomposition of a selfadjoint matrix \f$ A \f$ such that:
* \f$ A = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real symmetric tridiagonal matrix.
*
* A tridiagonal matrix is a matrix which has nonzero elements only on the
* main diagonal and the first diagonal below and above it. The Hessenberg
* decomposition of a selfadjoint matrix is in fact a tridiagonal
* decomposition. This class is used in SelfAdjointEigenSolver to compute the
* eigenvalues and eigenvectors of a selfadjoint matrix.
*
* Call the function compute() to compute the tridiagonal decomposition of a
* given matrix. Alternatively, you can use the Tridiagonalization(const MatrixType&)
* constructor which computes the tridiagonal Schur decomposition at
* construction time. Once the decomposition is computed, you can use the
* matrixQ() and matrixT() functions to retrieve the matrices Q and T in the
* decomposition.
*
* The documentation of Tridiagonalization(const MatrixType&) contains an
* example of the typical use of this class.
*
* \sa class HessenbergDecomposition, class SelfAdjointEigenSolver
*/
template<typename _MatrixType> class Tridiagonalization
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
Size = MatrixType::RowsAtCompileTime,
SizeMinusOne = Size == Dynamic ? Dynamic : (Size > 1 ? Size - 1 : 1),
Options = MatrixType::Options,
MaxSize = MatrixType::MaxRowsAtCompileTime,
MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : (MaxSize > 1 ? MaxSize - 1 : 1)
};
typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type DiagonalType;
typedef Matrix<RealScalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> SubDiagonalType;
typedef typename internal::remove_all<typename MatrixType::RealReturnType>::type MatrixTypeRealView;
typedef internal::TridiagonalizationMatrixTReturnType<MatrixTypeRealView> MatrixTReturnType;
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
typename internal::add_const_on_value_type<typename Diagonal<const MatrixType>::RealReturnType>::type,
const Diagonal<const MatrixType>
>::type DiagonalReturnType;
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
typename internal::add_const_on_value_type<typename Diagonal<const MatrixType, -1>::RealReturnType>::type,
const Diagonal<const MatrixType, -1>
>::type SubDiagonalReturnType;
/** \brief Return type of matrixQ() */
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
/** \brief Default constructor.
*
* \param [in] size Positive integer, size of the matrix whose tridiagonal
* decomposition will be computed.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). The \p size parameter is only
* used as a hint. It is not an error to give a wrong \p size, but it may
* impair performance.
*
* \sa compute() for an example.
*/
explicit Tridiagonalization(Index size = Size==Dynamic ? 2 : Size)
: m_matrix(size,size),
m_hCoeffs(size > 1 ? size-1 : 1),
m_isInitialized(false)
{}
/** \brief Constructor; computes tridiagonal decomposition of given matrix.
*
* \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition
* is to be computed.
*
* This constructor calls compute() to compute the tridiagonal decomposition.
*
* Example: \include Tridiagonalization_Tridiagonalization_MatrixType.cpp
* Output: \verbinclude Tridiagonalization_Tridiagonalization_MatrixType.out
*/
template<typename InputType>
explicit Tridiagonalization(const EigenBase<InputType>& matrix)
: m_matrix(matrix.derived()),
m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1),
m_isInitialized(false)
{
internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);
m_isInitialized = true;
}
/** \brief Computes tridiagonal decomposition of given matrix.
*
* \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition
* is to be computed.
* \returns Reference to \c *this
*
* The tridiagonal decomposition is computed by bringing the columns of
* the matrix successively in the required form using Householder
* reflections. The cost is \f$ 4n^3/3 \f$ flops, where \f$ n \f$ denotes
* the size of the given matrix.
*
* This method reuses of the allocated data in the Tridiagonalization
* object, if the size of the matrix does not change.
*
* Example: \include Tridiagonalization_compute.cpp
* Output: \verbinclude Tridiagonalization_compute.out
*/
template<typename InputType>
Tridiagonalization& compute(const EigenBase<InputType>& matrix)
{
m_matrix = matrix.derived();
m_hCoeffs.resize(matrix.rows()-1, 1);
internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);
m_isInitialized = true;
return *this;
}
/** \brief Returns the Householder coefficients.
*
* \returns a const reference to the vector of Householder coefficients
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* The Householder coefficients allow the reconstruction of the matrix
* \f$ Q \f$ in the tridiagonal decomposition from the packed data.
*
* Example: \include Tridiagonalization_householderCoefficients.cpp
* Output: \verbinclude Tridiagonalization_householderCoefficients.out
*
* \sa packedMatrix(), \ref Householder_Module "Householder module"
*/
inline CoeffVectorType householderCoefficients() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return m_hCoeffs;
}
/** \brief Returns the internal representation of the decomposition
*
* \returns a const reference to a matrix with the internal representation
* of the decomposition.
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* The returned matrix contains the following information:
* - the strict upper triangular part is equal to the input matrix A.
* - the diagonal and lower sub-diagonal represent the real tridiagonal
* symmetric matrix T.
* - the rest of the lower part contains the Householder vectors that,
* combined with Householder coefficients returned by
* householderCoefficients(), allows to reconstruct the matrix Q as
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
* Here, the matrices \f$ H_i \f$ are the Householder transformations
* \f$ H_i = (I - h_i v_i v_i^T) \f$
* where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
* \f$ v_i \f$ is the Householder vector defined by
* \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
* with M the matrix returned by this function.
*
* See LAPACK for further details on this packed storage.
*
* Example: \include Tridiagonalization_packedMatrix.cpp
* Output: \verbinclude Tridiagonalization_packedMatrix.out
*
* \sa householderCoefficients()
*/
inline const MatrixType& packedMatrix() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return m_matrix;
}
/** \brief Returns the unitary matrix Q in the decomposition
*
* \returns object representing the matrix Q
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* This function returns a light-weight object of template class
* HouseholderSequence. You can either apply it directly to a matrix or
* you can convert it to a matrix of type #MatrixType.
*
* \sa Tridiagonalization(const MatrixType&) for an example,
* matrixT(), class HouseholderSequence
*/
HouseholderSequenceType matrixQ() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())
.setLength(m_matrix.rows() - 1)
.setShift(1);
}
/** \brief Returns an expression of the tridiagonal matrix T in the decomposition
*
* \returns expression object representing the matrix T
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* Currently, this function can be used to extract the matrix T from internal
* data and copy it to a dense matrix object. In most cases, it may be
* sufficient to directly use the packed matrix or the vector expressions
* returned by diagonal() and subDiagonal() instead of creating a new
* dense copy matrix with this function.
*
* \sa Tridiagonalization(const MatrixType&) for an example,
* matrixQ(), packedMatrix(), diagonal(), subDiagonal()
*/
MatrixTReturnType matrixT() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return MatrixTReturnType(m_matrix.real());
}
/** \brief Returns the diagonal of the tridiagonal matrix T in the decomposition.
*
* \returns expression representing the diagonal of T
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* Example: \include Tridiagonalization_diagonal.cpp
* Output: \verbinclude Tridiagonalization_diagonal.out
*
* \sa matrixT(), subDiagonal()
*/
DiagonalReturnType diagonal() const;
/** \brief Returns the subdiagonal of the tridiagonal matrix T in the decomposition.
*
* \returns expression representing the subdiagonal of T
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* \sa diagonal() for an example, matrixT()
*/
SubDiagonalReturnType subDiagonal() const;
protected:
MatrixType m_matrix;
CoeffVectorType m_hCoeffs;
bool m_isInitialized;
};
template<typename MatrixType>
typename Tridiagonalization<MatrixType>::DiagonalReturnType
Tridiagonalization<MatrixType>::diagonal() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return m_matrix.diagonal().real();
}
template<typename MatrixType>
typename Tridiagonalization<MatrixType>::SubDiagonalReturnType
Tridiagonalization<MatrixType>::subDiagonal() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return m_matrix.template diagonal<-1>().real();
}
namespace internal {
/** \internal
* Performs a tridiagonal decomposition of the selfadjoint matrix \a matA in-place.
*
* \param[in,out] matA On input the selfadjoint matrix. Only the \b lower triangular part is referenced.
* On output, the strict upper part is left unchanged, and the lower triangular part
* represents the T and Q matrices in packed format has detailed below.
* \param[out] hCoeffs returned Householder coefficients (see below)
*
* On output, the tridiagonal selfadjoint matrix T is stored in the diagonal
* and lower sub-diagonal of the matrix \a matA.
* The unitary matrix Q is represented in a compact way as a product of
* Householder reflectors \f$ H_i \f$ such that:
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
* The Householder reflectors are defined as
* \f$ H_i = (I - h_i v_i v_i^T) \f$
* where \f$ h_i = hCoeffs[i]\f$ is the \f$ i \f$th Householder coefficient and
* \f$ v_i \f$ is the Householder vector defined by
* \f$ v_i = [ 0, \ldots, 0, 1, matA(i+2,i), \ldots, matA(N-1,i) ]^T \f$.
*
* Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
*
* \sa Tridiagonalization::packedMatrix()
*/
template<typename MatrixType, typename CoeffVectorType>
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
{
using numext::conj;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
Index n = matA.rows();
eigen_assert(n==matA.cols());
eigen_assert(n==hCoeffs.size()+1 || n==1);
for (Index i = 0; i<n-1; ++i)
{
Index remainingSize = n-i-1;
RealScalar beta;
Scalar h;
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
// Apply similarity transformation to remaining columns,
// i.e., A = H A H' where H = I - h v v' and v = matA.col(i).tail(n-i-1)
matA.col(i).coeffRef(i+1) = 1;
hCoeffs.tail(n-i-1).noalias() = (matA.bottomRightCorner(remainingSize,remainingSize).template selfadjointView<Lower>()
* (conj(h) * matA.col(i).tail(remainingSize)));
hCoeffs.tail(n-i-1) += (conj(h)*RealScalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);
matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView<Lower>()
.rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), Scalar(-1));
matA.col(i).coeffRef(i+1) = beta;
hCoeffs.coeffRef(i) = h;
}
}
// forward declaration, implementation at the end of this file
template<typename MatrixType,
int Size=MatrixType::ColsAtCompileTime,
bool IsComplex=NumTraits<typename MatrixType::Scalar>::IsComplex>
struct tridiagonalization_inplace_selector;
/** \brief Performs a full tridiagonalization in place
*
* \param[in,out] mat On input, the selfadjoint matrix whose tridiagonal
* decomposition is to be computed. Only the lower triangular part referenced.
* The rest is left unchanged. On output, the orthogonal matrix Q
* in the decomposition if \p extractQ is true.
* \param[out] diag The diagonal of the tridiagonal matrix T in the
* decomposition.
* \param[out] subdiag The subdiagonal of the tridiagonal matrix T in
* the decomposition.
* \param[in] extractQ If true, the orthogonal matrix Q in the
* decomposition is computed and stored in \p mat.
*
* Computes the tridiagonal decomposition of the selfadjoint matrix \p mat in place
* such that \f$ mat = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real
* symmetric tridiagonal matrix.
*
* The tridiagonal matrix T is passed to the output parameters \p diag and \p subdiag. If
* \p extractQ is true, then the orthogonal matrix Q is passed to \p mat. Otherwise the lower
* part of the matrix \p mat is destroyed.
*
* The vectors \p diag and \p subdiag are not resized. The function
* assumes that they are already of the correct size. The length of the
* vector \p diag should equal the number of rows in \p mat, and the
* length of the vector \p subdiag should be one left.
*
* This implementation contains an optimized path for 3-by-3 matrices
* which is especially useful for plane fitting.
*
* \note Currently, it requires two temporary vectors to hold the intermediate
* Householder coefficients, and to reconstruct the matrix Q from the Householder
* reflectors.
*
* Example (this uses the same matrix as the example in
* Tridiagonalization::Tridiagonalization(const MatrixType&)):
* \include Tridiagonalization_decomposeInPlace.cpp
* Output: \verbinclude Tridiagonalization_decomposeInPlace.out
*
* \sa class Tridiagonalization
*/
template<typename MatrixType, typename DiagonalType, typename SubDiagonalType>
void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
}
/** \internal
* General full tridiagonalization
*/
template<typename MatrixType, int Size, bool IsComplex>
struct tridiagonalization_inplace_selector
{
typedef typename Tridiagonalization<MatrixType>::CoeffVectorType CoeffVectorType;
typedef typename Tridiagonalization<MatrixType>::HouseholderSequenceType HouseholderSequenceType;
template<typename DiagonalType, typename SubDiagonalType>
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
CoeffVectorType hCoeffs(mat.cols()-1);
tridiagonalization_inplace(mat,hCoeffs);
diag = mat.diagonal().real();
subdiag = mat.template diagonal<-1>().real();
if(extractQ)
mat = HouseholderSequenceType(mat, hCoeffs.conjugate())
.setLength(mat.rows() - 1)
.setShift(1);
}
};
/** \internal
* Specialization for 3x3 real matrices.
* Especially useful for plane fitting.
*/
template<typename MatrixType>
struct tridiagonalization_inplace_selector<MatrixType,3,false>
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
template<typename DiagonalType, typename SubDiagonalType>
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
using std::sqrt;
const RealScalar tol = (std::numeric_limits<RealScalar>::min)();
diag[0] = mat(0,0);
RealScalar v1norm2 = numext::abs2(mat(2,0));
if(v1norm2 <= tol)
{
diag[1] = mat(1,1);
diag[2] = mat(2,2);
subdiag[0] = mat(1,0);
subdiag[1] = mat(2,1);
if (extractQ)
mat.setIdentity();
}
else
{
RealScalar beta = sqrt(numext::abs2(mat(1,0)) + v1norm2);
RealScalar invBeta = RealScalar(1)/beta;
Scalar m01 = mat(1,0) * invBeta;
Scalar m02 = mat(2,0) * invBeta;
Scalar q = RealScalar(2)*m01*mat(2,1) + m02*(mat(2,2) - mat(1,1));
diag[1] = mat(1,1) + m02*q;
diag[2] = mat(2,2) - m02*q;
subdiag[0] = beta;
subdiag[1] = mat(2,1) - m01 * q;
if (extractQ)
{
mat << 1, 0, 0,
0, m01, m02,
0, m02, -m01;
}
}
}
};
/** \internal
* Trivial specialization for 1x1 matrices
*/
template<typename MatrixType, bool IsComplex>
struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
{
typedef typename MatrixType::Scalar Scalar;
template<typename DiagonalType, typename SubDiagonalType>
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
{
diag(0,0) = numext::real(mat(0,0));
if(extractQ)
mat(0,0) = Scalar(1);
}
};
/** \internal
* \eigenvalues_module \ingroup Eigenvalues_Module
*
* \brief Expression type for return value of Tridiagonalization::matrixT()
*
* \tparam MatrixType type of underlying dense matrix
*/
template<typename MatrixType> struct TridiagonalizationMatrixTReturnType
: public ReturnByValue<TridiagonalizationMatrixTReturnType<MatrixType> >
{
public:
/** \brief Constructor.
*
* \param[in] mat The underlying dense matrix
*/
TridiagonalizationMatrixTReturnType(const MatrixType& mat) : m_matrix(mat) { }
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
result.setZero();
result.template diagonal<1>() = m_matrix.template diagonal<-1>().conjugate();
result.diagonal() = m_matrix.diagonal();
result.template diagonal<-1>() = m_matrix.template diagonal<-1>();
}
Index rows() const { return m_matrix.rows(); }
Index cols() const { return m_matrix.cols(); }
protected:
typename MatrixType::Nested m_matrix;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRIDIAGONALIZATION_H
| 22,444 | 39.29623 | 151 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/AlignedBox.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ALIGNEDBOX_H
#define EIGEN_ALIGNEDBOX_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
*
* \class AlignedBox
*
* \brief An axis aligned box
*
* \tparam _Scalar the type of the scalar coefficients
* \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
*
* This class represents an axis aligned box as a pair of the minimal and maximal corners.
* \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty().
* \sa alignedboxtypedefs
*/
template <typename _Scalar, int _AmbientDim>
class AlignedBox
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar;
typedef NumTraits<Scalar> ScalarTraits;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef typename ScalarTraits::Real RealScalar;
typedef typename ScalarTraits::NonInteger NonInteger;
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
typedef CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const VectorType, const VectorType> VectorTypeSum;
/** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
enum CornerType
{
/** 1D names @{ */
Min=0, Max=1,
/** @} */
/** Identifier for 2D corner @{ */
BottomLeft=0, BottomRight=1,
TopLeft=2, TopRight=3,
/** @} */
/** Identifier for 3D corner @{ */
BottomLeftFloor=0, BottomRightFloor=1,
TopLeftFloor=2, TopRightFloor=3,
BottomLeftCeil=4, BottomRightCeil=5,
TopLeftCeil=6, TopRightCeil=7
/** @} */
};
/** Default constructor initializing a null box. */
EIGEN_DEVICE_FUNC inline AlignedBox()
{ if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
/** Constructs a null box with \a _dim the dimension of the ambient space. */
EIGEN_DEVICE_FUNC inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
{ setEmpty(); }
/** Constructs a box with extremities \a _min and \a _max.
* \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */
template<typename OtherVectorType1, typename OtherVectorType2>
EIGEN_DEVICE_FUNC inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
/** Constructs a box containing a single point \a p. */
template<typename Derived>
EIGEN_DEVICE_FUNC inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min)
{ }
EIGEN_DEVICE_FUNC ~AlignedBox() {}
/** \returns the dimension in which the box holds */
EIGEN_DEVICE_FUNC inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
/** \deprecated use isEmpty() */
EIGEN_DEVICE_FUNC inline bool isNull() const { return isEmpty(); }
/** \deprecated use setEmpty() */
EIGEN_DEVICE_FUNC inline void setNull() { setEmpty(); }
/** \returns true if the box is empty.
* \sa setEmpty */
EIGEN_DEVICE_FUNC inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
/** Makes \c *this an empty box.
* \sa isEmpty */
EIGEN_DEVICE_FUNC inline void setEmpty()
{
m_min.setConstant( ScalarTraits::highest() );
m_max.setConstant( ScalarTraits::lowest() );
}
/** \returns the minimal corner */
EIGEN_DEVICE_FUNC inline const VectorType& (min)() const { return m_min; }
/** \returns a non const reference to the minimal corner */
EIGEN_DEVICE_FUNC inline VectorType& (min)() { return m_min; }
/** \returns the maximal corner */
EIGEN_DEVICE_FUNC inline const VectorType& (max)() const { return m_max; }
/** \returns a non const reference to the maximal corner */
EIGEN_DEVICE_FUNC inline VectorType& (max)() { return m_max; }
/** \returns the center of the box */
EIGEN_DEVICE_FUNC inline const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(VectorTypeSum, RealScalar, quotient)
center() const
{ return (m_min+m_max)/RealScalar(2); }
/** \returns the lengths of the sides of the bounding box.
* Note that this function does not get the same
* result for integral or floating scalar types: see
*/
EIGEN_DEVICE_FUNC inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> sizes() const
{ return m_max - m_min; }
/** \returns the volume of the bounding box */
EIGEN_DEVICE_FUNC inline Scalar volume() const
{ return sizes().prod(); }
/** \returns an expression for the bounding box diagonal vector
* if the length of the diagonal is needed: diagonal().norm()
* will provide it.
*/
EIGEN_DEVICE_FUNC inline CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> diagonal() const
{ return sizes(); }
/** \returns the vertex of the bounding box at the corner defined by
* the corner-id corner. It works only for a 1D, 2D or 3D bounding box.
* For 1D bounding boxes corners are named by 2 enum constants:
* BottomLeft and BottomRight.
* For 2D bounding boxes, corners are named by 4 enum constants:
* BottomLeft, BottomRight, TopLeft, TopRight.
* For 3D bounding boxes, the following names are added:
* BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil.
*/
EIGEN_DEVICE_FUNC inline VectorType corner(CornerType corner) const
{
EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE);
VectorType res;
Index mult = 1;
for(Index d=0; d<dim(); ++d)
{
if( mult & corner ) res[d] = m_max[d];
else res[d] = m_min[d];
mult *= 2;
}
return res;
}
/** \returns a random point inside the bounding box sampled with
* a uniform distribution */
EIGEN_DEVICE_FUNC inline VectorType sample() const
{
VectorType r(dim());
for(Index d=0; d<dim(); ++d)
{
if(!ScalarTraits::IsInteger)
{
r[d] = m_min[d] + (m_max[d]-m_min[d])
* internal::random<Scalar>(Scalar(0), Scalar(1));
}
else
r[d] = internal::random(m_min[d], m_max[d]);
}
return r;
}
/** \returns true if the point \a p is inside the box \c *this. */
template<typename Derived>
EIGEN_DEVICE_FUNC inline bool contains(const MatrixBase<Derived>& p) const
{
typename internal::nested_eval<Derived,2>::type p_n(p.derived());
return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all();
}
/** \returns true if the box \a b is entirely inside the box \c *this. */
EIGEN_DEVICE_FUNC inline bool contains(const AlignedBox& b) const
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
/** \returns true if the box \a b is intersecting the box \c *this.
* \sa intersection, clamp */
EIGEN_DEVICE_FUNC inline bool intersects(const AlignedBox& b) const
{ return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); }
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this.
* \sa extend(const AlignedBox&) */
template<typename Derived>
EIGEN_DEVICE_FUNC inline AlignedBox& extend(const MatrixBase<Derived>& p)
{
typename internal::nested_eval<Derived,2>::type p_n(p.derived());
m_min = m_min.cwiseMin(p_n);
m_max = m_max.cwiseMax(p_n);
return *this;
}
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this.
* \sa merged, extend(const MatrixBase&) */
EIGEN_DEVICE_FUNC inline AlignedBox& extend(const AlignedBox& b)
{
m_min = m_min.cwiseMin(b.m_min);
m_max = m_max.cwiseMax(b.m_max);
return *this;
}
/** Clamps \c *this by the box \a b and returns a reference to \c *this.
* \note If the boxes don't intersect, the resulting box is empty.
* \sa intersection(), intersects() */
EIGEN_DEVICE_FUNC inline AlignedBox& clamp(const AlignedBox& b)
{
m_min = m_min.cwiseMax(b.m_min);
m_max = m_max.cwiseMin(b.m_max);
return *this;
}
/** Returns an AlignedBox that is the intersection of \a b and \c *this
* \note If the boxes don't intersect, the resulting box is empty.
* \sa intersects(), clamp, contains() */
EIGEN_DEVICE_FUNC inline AlignedBox intersection(const AlignedBox& b) const
{return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
/** Returns an AlignedBox that is the union of \a b and \c *this.
* \note Merging with an empty box may result in a box bigger than \c *this.
* \sa extend(const AlignedBox&) */
EIGEN_DEVICE_FUNC inline AlignedBox merged(const AlignedBox& b) const
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
/** Translate \c *this by the vector \a t and returns a reference to \c *this. */
template<typename Derived>
EIGEN_DEVICE_FUNC inline AlignedBox& translate(const MatrixBase<Derived>& a_t)
{
const typename internal::nested_eval<Derived,2>::type t(a_t.derived());
m_min += t;
m_max += t;
return *this;
}
/** \returns the squared distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
* \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)
*/
template<typename Derived>
EIGEN_DEVICE_FUNC inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const;
/** \returns the squared distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
* \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&)
*/
EIGEN_DEVICE_FUNC inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
/** \returns the distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
* \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&)
*/
template<typename Derived>
EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
{ EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(p))); }
/** \returns the distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
* \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)
*/
EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const AlignedBox& b) const
{ EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(b))); }
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<AlignedBox,
AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
{
return typename internal::cast_return_type<AlignedBox,
AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
}
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
EIGEN_DEVICE_FUNC inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
{
m_min = (other.min)().template cast<Scalar>();
m_max = (other.max)().template cast<Scalar>();
}
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
EIGEN_DEVICE_FUNC bool isApprox(const AlignedBox& other, const RealScalar& prec = ScalarTraits::dummy_precision()) const
{ return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
protected:
VectorType m_min, m_max;
};
template<typename Scalar,int AmbientDim>
template<typename Derived>
EIGEN_DEVICE_FUNC inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const MatrixBase<Derived>& a_p) const
{
typename internal::nested_eval<Derived,2*AmbientDim>::type p(a_p.derived());
Scalar dist2(0);
Scalar aux;
for (Index k=0; k<dim(); ++k)
{
if( m_min[k] > p[k] )
{
aux = m_min[k] - p[k];
dist2 += aux*aux;
}
else if( p[k] > m_max[k] )
{
aux = p[k] - m_max[k];
dist2 += aux*aux;
}
}
return dist2;
}
template<typename Scalar,int AmbientDim>
EIGEN_DEVICE_FUNC inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const AlignedBox& b) const
{
Scalar dist2(0);
Scalar aux;
for (Index k=0; k<dim(); ++k)
{
if( m_min[k] > b.m_max[k] )
{
aux = m_min[k] - b.m_max[k];
dist2 += aux*aux;
}
else if( b.m_min[k] > m_max[k] )
{
aux = b.m_min[k] - m_max[k];
dist2 += aux*aux;
}
}
return dist2;
}
/** \defgroup alignedboxtypedefs Global aligned box typedefs
*
* \ingroup Geometry_Module
*
* Eigen defines several typedef shortcuts for most common aligned box types.
*
* The general patterns are the following:
*
* \c AlignedBoxSizeType where \c Size can be \c 1, \c 2,\c 3,\c 4 for fixed size boxes or \c X for dynamic size,
* and where \c Type can be \c i for integer, \c f for float, \c d for double.
*
* For example, \c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \c AlignedBoxXf is a dynamic-size aligned box of floats.
*
* \sa class AlignedBox
*/
#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \
/** \ingroup alignedboxtypedefs */ \
typedef AlignedBox<Type, Size> AlignedBox##SizeSuffix##TypeSuffix;
#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \
EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X)
EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i)
EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f)
EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d)
#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
#undef EIGEN_MAKE_TYPEDEFS
} // end namespace Eigen
#endif // EIGEN_ALIGNEDBOX_H
| 14,815 | 36.699746 | 144 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/AngleAxis.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ANGLEAXIS_H
#define EIGEN_ANGLEAXIS_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
* \class AngleAxis
*
* \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis
*
* \param _Scalar the scalar type, i.e., the type of the coefficients.
*
* \warning When setting up an AngleAxis object, the axis vector \b must \b be \b normalized.
*
* The following two typedefs are provided for convenience:
* \li \c AngleAxisf for \c float
* \li \c AngleAxisd for \c double
*
* Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily
* mimic Euler-angles. Here is an example:
* \include AngleAxis_mimic_euler.cpp
* Output: \verbinclude AngleAxis_mimic_euler.out
*
* \note This class is not aimed to be used to store a rotation transformation,
* but rather to make easier the creation of other rotation (Quaternion, rotation Matrix)
* and transformation objects.
*
* \sa class Quaternion, class Transform, MatrixBase::UnitX()
*/
namespace internal {
template<typename _Scalar> struct traits<AngleAxis<_Scalar> >
{
typedef _Scalar Scalar;
};
}
template<typename _Scalar>
class AngleAxis : public RotationBase<AngleAxis<_Scalar>,3>
{
typedef RotationBase<AngleAxis<_Scalar>,3> Base;
public:
using Base::operator*;
enum { Dim = 3 };
/** the scalar type of the coefficients */
typedef _Scalar Scalar;
typedef Matrix<Scalar,3,3> Matrix3;
typedef Matrix<Scalar,3,1> Vector3;
typedef Quaternion<Scalar> QuaternionType;
protected:
Vector3 m_axis;
Scalar m_angle;
public:
/** Default constructor without initialization. */
EIGEN_DEVICE_FUNC AngleAxis() {}
/** Constructs and initialize the angle-axis rotation from an \a angle in radian
* and an \a axis which \b must \b be \b normalized.
*
* \warning If the \a axis vector is not normalized, then the angle-axis object
* represents an invalid rotation. */
template<typename Derived>
EIGEN_DEVICE_FUNC
inline AngleAxis(const Scalar& angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
/** Constructs and initialize the angle-axis rotation from a quaternion \a q.
* This function implicitly normalizes the quaternion \a q.
*/
template<typename QuatDerived>
EIGEN_DEVICE_FUNC inline explicit AngleAxis(const QuaternionBase<QuatDerived>& q) { *this = q; }
/** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
template<typename Derived>
EIGEN_DEVICE_FUNC inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
/** \returns the value of the rotation angle in radian */
EIGEN_DEVICE_FUNC Scalar angle() const { return m_angle; }
/** \returns a read-write reference to the stored angle in radian */
EIGEN_DEVICE_FUNC Scalar& angle() { return m_angle; }
/** \returns the rotation axis */
EIGEN_DEVICE_FUNC const Vector3& axis() const { return m_axis; }
/** \returns a read-write reference to the stored rotation axis.
*
* \warning The rotation axis must remain a \b unit vector.
*/
EIGEN_DEVICE_FUNC Vector3& axis() { return m_axis; }
/** Concatenates two rotations */
EIGEN_DEVICE_FUNC inline QuaternionType operator* (const AngleAxis& other) const
{ return QuaternionType(*this) * QuaternionType(other); }
/** Concatenates two rotations */
EIGEN_DEVICE_FUNC inline QuaternionType operator* (const QuaternionType& other) const
{ return QuaternionType(*this) * other; }
/** Concatenates two rotations */
friend EIGEN_DEVICE_FUNC inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)
{ return a * QuaternionType(b); }
/** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */
EIGEN_DEVICE_FUNC AngleAxis inverse() const
{ return AngleAxis(-m_angle, m_axis); }
template<class QuatDerived>
EIGEN_DEVICE_FUNC AngleAxis& operator=(const QuaternionBase<QuatDerived>& q);
template<typename Derived>
EIGEN_DEVICE_FUNC AngleAxis& operator=(const MatrixBase<Derived>& m);
template<typename Derived>
EIGEN_DEVICE_FUNC AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);
EIGEN_DEVICE_FUNC Matrix3 toRotationMatrix(void) const;
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
{ return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
EIGEN_DEVICE_FUNC inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other)
{
m_axis = other.axis().template cast<Scalar>();
m_angle = Scalar(other.angle());
}
EIGEN_DEVICE_FUNC static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); }
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
EIGEN_DEVICE_FUNC bool isApprox(const AngleAxis& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); }
};
/** \ingroup Geometry_Module
* single precision angle-axis type */
typedef AngleAxis<float> AngleAxisf;
/** \ingroup Geometry_Module
* double precision angle-axis type */
typedef AngleAxis<double> AngleAxisd;
/** Set \c *this from a \b unit quaternion.
*
* The resulting axis is normalized, and the computed angle is in the [0,pi] range.
*
* This function implicitly normalizes the quaternion \a q.
*/
template<typename Scalar>
template<typename QuatDerived>
EIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q)
{
EIGEN_USING_STD_MATH(atan2)
EIGEN_USING_STD_MATH(abs)
Scalar n = q.vec().norm();
if(n<NumTraits<Scalar>::epsilon())
n = q.vec().stableNorm();
if (n != Scalar(0))
{
m_angle = Scalar(2)*atan2(n, abs(q.w()));
if(q.w() < 0)
n = -n;
m_axis = q.vec() / n;
}
else
{
m_angle = Scalar(0);
m_axis << Scalar(1), Scalar(0), Scalar(0);
}
return *this;
}
/** Set \c *this from a 3x3 rotation matrix \a mat.
*/
template<typename Scalar>
template<typename Derived>
EIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
{
// Since a direct conversion would not be really faster,
// let's use the robust Quaternion implementation:
return *this = QuaternionType(mat);
}
/**
* \brief Sets \c *this from a 3x3 rotation matrix.
**/
template<typename Scalar>
template<typename Derived>
EIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
{
return *this = QuaternionType(mat);
}
/** Constructs and \returns an equivalent 3x3 rotation matrix.
*/
template<typename Scalar>
typename AngleAxis<Scalar>::Matrix3
EIGEN_DEVICE_FUNC AngleAxis<Scalar>::toRotationMatrix(void) const
{
EIGEN_USING_STD_MATH(sin)
EIGEN_USING_STD_MATH(cos)
Matrix3 res;
Vector3 sin_axis = sin(m_angle) * m_axis;
Scalar c = cos(m_angle);
Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
Scalar tmp;
tmp = cos1_axis.x() * m_axis.y();
res.coeffRef(0,1) = tmp - sin_axis.z();
res.coeffRef(1,0) = tmp + sin_axis.z();
tmp = cos1_axis.x() * m_axis.z();
res.coeffRef(0,2) = tmp + sin_axis.y();
res.coeffRef(2,0) = tmp - sin_axis.y();
tmp = cos1_axis.y() * m_axis.z();
res.coeffRef(1,2) = tmp - sin_axis.x();
res.coeffRef(2,1) = tmp + sin_axis.x();
res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c;
return res;
}
} // end namespace Eigen
#endif // EIGEN_ANGLEAXIS_H
| 8,415 | 32.935484 | 148 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/EulerAngles.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_EULERANGLES_H
#define EIGEN_EULERANGLES_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
*
* \returns the Euler-angles of the rotation matrix \c *this using the convention defined by the triplet (\a a0,\a a1,\a a2)
*
* Each of the three parameters \a a0,\a a1,\a a2 represents the respective rotation axis as an integer in {0,1,2}.
* For instance, in:
* \code Vector3f ea = mat.eulerAngles(2, 0, 2); \endcode
* "2" represents the z axis and "0" the x axis, etc. The returned angles are such that
* we have the following equality:
* \code
* mat == AngleAxisf(ea[0], Vector3f::UnitZ())
* * AngleAxisf(ea[1], Vector3f::UnitX())
* * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode
* This corresponds to the right-multiply conventions (with right hand side frames).
*
* The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi].
*
* \sa class AngleAxis
*/
template<typename Derived>
EIGEN_DEVICE_FUNC inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const
{
EIGEN_USING_STD_MATH(atan2)
EIGEN_USING_STD_MATH(sin)
EIGEN_USING_STD_MATH(cos)
/* Implemented from Graphics Gems IV */
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)
Matrix<Scalar,3,1> res;
typedef Matrix<typename Derived::Scalar,2,1> Vector2;
const Index odd = ((a0+1)%3 == a1) ? 0 : 1;
const Index i = a0;
const Index j = (a0 + 1 + odd)%3;
const Index k = (a0 + 2 - odd)%3;
if (a0==a2)
{
res[0] = atan2(coeff(j,i), coeff(k,i));
if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0)))
{
if(res[0] > Scalar(0)) {
res[0] -= Scalar(EIGEN_PI);
}
else {
res[0] += Scalar(EIGEN_PI);
}
Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
res[1] = -atan2(s2, coeff(i,i));
}
else
{
Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
res[1] = atan2(s2, coeff(i,i));
}
// With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles,
// we can compute their respective rotation, and apply its inverse to M. Since the result must
// be a rotation around x, we have:
//
// c2 s1.s2 c1.s2 1 0 0
// 0 c1 -s1 * M = 0 c3 s3
// -s2 s1.c2 c1.c2 0 -s3 c3
//
// Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3
Scalar s1 = sin(res[0]);
Scalar c1 = cos(res[0]);
res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j));
}
else
{
res[0] = atan2(coeff(j,k), coeff(k,k));
Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm();
if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) {
if(res[0] > Scalar(0)) {
res[0] -= Scalar(EIGEN_PI);
}
else {
res[0] += Scalar(EIGEN_PI);
}
res[1] = atan2(-coeff(i,k), -c2);
}
else
res[1] = atan2(-coeff(i,k), c2);
Scalar s1 = sin(res[0]);
Scalar c1 = cos(res[0]);
res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j));
}
if (!odd)
res = -res;
return res;
}
} // end namespace Eigen
#endif // EIGEN_EULERANGLES_H
| 3,639 | 30.652174 | 125 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/Homogeneous.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_HOMOGENEOUS_H
#define EIGEN_HOMOGENEOUS_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
* \class Homogeneous
*
* \brief Expression of one (or a set of) homogeneous vector(s)
*
* \param MatrixType the type of the object in which we are making homogeneous
*
* This class represents an expression of one (or a set of) homogeneous vector(s).
* It is the return type of MatrixBase::homogeneous() and most of the time
* this is the only way it is used.
*
* \sa MatrixBase::homogeneous()
*/
namespace internal {
template<typename MatrixType,int Direction>
struct traits<Homogeneous<MatrixType,Direction> >
: traits<MatrixType>
{
typedef typename traits<MatrixType>::StorageKind StorageKind;
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
enum {
RowsPlusOne = (MatrixType::RowsAtCompileTime != Dynamic) ?
int(MatrixType::RowsAtCompileTime) + 1 : Dynamic,
ColsPlusOne = (MatrixType::ColsAtCompileTime != Dynamic) ?
int(MatrixType::ColsAtCompileTime) + 1 : Dynamic,
RowsAtCompileTime = Direction==Vertical ? RowsPlusOne : MatrixType::RowsAtCompileTime,
ColsAtCompileTime = Direction==Horizontal ? ColsPlusOne : MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
TmpFlags = _MatrixTypeNested::Flags & HereditaryBits,
Flags = ColsAtCompileTime==1 ? (TmpFlags & ~RowMajorBit)
: RowsAtCompileTime==1 ? (TmpFlags | RowMajorBit)
: TmpFlags
};
};
template<typename MatrixType,typename Lhs> struct homogeneous_left_product_impl;
template<typename MatrixType,typename Rhs> struct homogeneous_right_product_impl;
} // end namespace internal
template<typename MatrixType,int _Direction> class Homogeneous
: public MatrixBase<Homogeneous<MatrixType,_Direction> >, internal::no_assignment_operator
{
public:
typedef MatrixType NestedExpression;
enum { Direction = _Direction };
typedef MatrixBase<Homogeneous> Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Homogeneous)
EIGEN_DEVICE_FUNC explicit inline Homogeneous(const MatrixType& matrix)
: m_matrix(matrix)
{}
EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }
EIGEN_DEVICE_FUNC const NestedExpression& nestedExpression() const { return m_matrix; }
template<typename Rhs>
EIGEN_DEVICE_FUNC inline const Product<Homogeneous,Rhs>
operator* (const MatrixBase<Rhs>& rhs) const
{
eigen_assert(int(Direction)==Horizontal);
return Product<Homogeneous,Rhs>(*this,rhs.derived());
}
template<typename Lhs> friend
EIGEN_DEVICE_FUNC inline const Product<Lhs,Homogeneous>
operator* (const MatrixBase<Lhs>& lhs, const Homogeneous& rhs)
{
eigen_assert(int(Direction)==Vertical);
return Product<Lhs,Homogeneous>(lhs.derived(),rhs);
}
template<typename Scalar, int Dim, int Mode, int Options> friend
EIGEN_DEVICE_FUNC inline const Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous >
operator* (const Transform<Scalar,Dim,Mode,Options>& lhs, const Homogeneous& rhs)
{
eigen_assert(int(Direction)==Vertical);
return Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous>(lhs,rhs);
}
template<typename Func>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::result_of<Func(Scalar,Scalar)>::type
redux(const Func& func) const
{
return func(m_matrix.redux(func), Scalar(1));
}
protected:
typename MatrixType::Nested m_matrix;
};
/** \geometry_module \ingroup Geometry_Module
*
* \returns a vector expression that is one longer than the vector argument, with the value 1 symbolically appended as the last coefficient.
*
* This can be used to convert affine coordinates to homogeneous coordinates.
*
* \only_for_vectors
*
* Example: \include MatrixBase_homogeneous.cpp
* Output: \verbinclude MatrixBase_homogeneous.out
*
* \sa VectorwiseOp::homogeneous(), class Homogeneous
*/
template<typename Derived>
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::HomogeneousReturnType
MatrixBase<Derived>::homogeneous() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
return HomogeneousReturnType(derived());
}
/** \geometry_module \ingroup Geometry_Module
*
* \returns an expression where the value 1 is symbolically appended as the final coefficient to each column (or row) of the matrix.
*
* This can be used to convert affine coordinates to homogeneous coordinates.
*
* Example: \include VectorwiseOp_homogeneous.cpp
* Output: \verbinclude VectorwiseOp_homogeneous.out
*
* \sa MatrixBase::homogeneous(), class Homogeneous */
template<typename ExpressionType, int Direction>
EIGEN_DEVICE_FUNC inline Homogeneous<ExpressionType,Direction>
VectorwiseOp<ExpressionType,Direction>::homogeneous() const
{
return HomogeneousReturnType(_expression());
}
/** \geometry_module \ingroup Geometry_Module
*
* \brief homogeneous normalization
*
* \returns a vector expression of the N-1 first coefficients of \c *this divided by that last coefficient.
*
* This can be used to convert homogeneous coordinates to affine coordinates.
*
* It is essentially a shortcut for:
* \code
this->head(this->size()-1)/this->coeff(this->size()-1);
\endcode
*
* Example: \include MatrixBase_hnormalized.cpp
* Output: \verbinclude MatrixBase_hnormalized.out
*
* \sa VectorwiseOp::hnormalized() */
template<typename Derived>
EIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::HNormalizedReturnType
MatrixBase<Derived>::hnormalized() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
return ConstStartMinusOne(derived(),0,0,
ColsAtCompileTime==1?size()-1:1,
ColsAtCompileTime==1?1:size()-1) / coeff(size()-1);
}
/** \geometry_module \ingroup Geometry_Module
*
* \brief column or row-wise homogeneous normalization
*
* \returns an expression of the first N-1 coefficients of each column (or row) of \c *this divided by the last coefficient of each column (or row).
*
* This can be used to convert homogeneous coordinates to affine coordinates.
*
* It is conceptually equivalent to calling MatrixBase::hnormalized() to each column (or row) of \c *this.
*
* Example: \include DirectionWise_hnormalized.cpp
* Output: \verbinclude DirectionWise_hnormalized.out
*
* \sa MatrixBase::hnormalized() */
template<typename ExpressionType, int Direction>
EIGEN_DEVICE_FUNC inline const typename VectorwiseOp<ExpressionType,Direction>::HNormalizedReturnType
VectorwiseOp<ExpressionType,Direction>::hnormalized() const
{
return HNormalized_Block(_expression(),0,0,
Direction==Vertical ? _expression().rows()-1 : _expression().rows(),
Direction==Horizontal ? _expression().cols()-1 : _expression().cols()).cwiseQuotient(
Replicate<HNormalized_Factors,
Direction==Vertical ? HNormalized_SizeMinusOne : 1,
Direction==Horizontal ? HNormalized_SizeMinusOne : 1>
(HNormalized_Factors(_expression(),
Direction==Vertical ? _expression().rows()-1:0,
Direction==Horizontal ? _expression().cols()-1:0,
Direction==Vertical ? 1 : _expression().rows(),
Direction==Horizontal ? 1 : _expression().cols()),
Direction==Vertical ? _expression().rows()-1 : 1,
Direction==Horizontal ? _expression().cols()-1 : 1));
}
namespace internal {
template<typename MatrixOrTransformType>
struct take_matrix_for_product
{
typedef MatrixOrTransformType type;
EIGEN_DEVICE_FUNC static const type& run(const type &x) { return x; }
};
template<typename Scalar, int Dim, int Mode,int Options>
struct take_matrix_for_product<Transform<Scalar, Dim, Mode, Options> >
{
typedef Transform<Scalar, Dim, Mode, Options> TransformType;
typedef typename internal::add_const<typename TransformType::ConstAffinePart>::type type;
EIGEN_DEVICE_FUNC static type run (const TransformType& x) { return x.affine(); }
};
template<typename Scalar, int Dim, int Options>
struct take_matrix_for_product<Transform<Scalar, Dim, Projective, Options> >
{
typedef Transform<Scalar, Dim, Projective, Options> TransformType;
typedef typename TransformType::MatrixType type;
EIGEN_DEVICE_FUNC static const type& run (const TransformType& x) { return x.matrix(); }
};
template<typename MatrixType,typename Lhs>
struct traits<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >
{
typedef typename take_matrix_for_product<Lhs>::type LhsMatrixType;
typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned;
typedef typename make_proper_matrix_type<
typename traits<MatrixTypeCleaned>::Scalar,
LhsMatrixTypeCleaned::RowsAtCompileTime,
MatrixTypeCleaned::ColsAtCompileTime,
MatrixTypeCleaned::PlainObject::Options,
LhsMatrixTypeCleaned::MaxRowsAtCompileTime,
MatrixTypeCleaned::MaxColsAtCompileTime>::type ReturnType;
};
template<typename MatrixType,typename Lhs>
struct homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs>
: public ReturnByValue<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >
{
typedef typename traits<homogeneous_left_product_impl>::LhsMatrixType LhsMatrixType;
typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned;
typedef typename remove_all<typename LhsMatrixTypeCleaned::Nested>::type LhsMatrixTypeNested;
EIGEN_DEVICE_FUNC homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs)
: m_lhs(take_matrix_for_product<Lhs>::run(lhs)),
m_rhs(rhs)
{}
EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }
template<typename Dest> EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const
{
// FIXME investigate how to allow lazy evaluation of this product when possible
dst = Block<const LhsMatrixTypeNested,
LhsMatrixTypeNested::RowsAtCompileTime,
LhsMatrixTypeNested::ColsAtCompileTime==Dynamic?Dynamic:LhsMatrixTypeNested::ColsAtCompileTime-1>
(m_lhs,0,0,m_lhs.rows(),m_lhs.cols()-1) * m_rhs;
dst += m_lhs.col(m_lhs.cols()-1).rowwise()
.template replicate<MatrixType::ColsAtCompileTime>(m_rhs.cols());
}
typename LhsMatrixTypeCleaned::Nested m_lhs;
typename MatrixType::Nested m_rhs;
};
template<typename MatrixType,typename Rhs>
struct traits<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >
{
typedef typename make_proper_matrix_type<typename traits<MatrixType>::Scalar,
MatrixType::RowsAtCompileTime,
Rhs::ColsAtCompileTime,
MatrixType::PlainObject::Options,
MatrixType::MaxRowsAtCompileTime,
Rhs::MaxColsAtCompileTime>::type ReturnType;
};
template<typename MatrixType,typename Rhs>
struct homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs>
: public ReturnByValue<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >
{
typedef typename remove_all<typename Rhs::Nested>::type RhsNested;
EIGEN_DEVICE_FUNC homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs)
: m_lhs(lhs), m_rhs(rhs)
{}
EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }
template<typename Dest> EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const
{
// FIXME investigate how to allow lazy evaluation of this product when possible
dst = m_lhs * Block<const RhsNested,
RhsNested::RowsAtCompileTime==Dynamic?Dynamic:RhsNested::RowsAtCompileTime-1,
RhsNested::ColsAtCompileTime>
(m_rhs,0,0,m_rhs.rows()-1,m_rhs.cols());
dst += m_rhs.row(m_rhs.rows()-1).colwise()
.template replicate<MatrixType::RowsAtCompileTime>(m_lhs.rows());
}
typename MatrixType::Nested m_lhs;
typename Rhs::Nested m_rhs;
};
template<typename ArgType,int Direction>
struct evaluator_traits<Homogeneous<ArgType,Direction> >
{
typedef typename storage_kind_to_evaluator_kind<typename ArgType::StorageKind>::Kind Kind;
typedef HomogeneousShape Shape;
};
template<> struct AssignmentKind<DenseShape,HomogeneousShape> { typedef Dense2Dense Kind; };
template<typename ArgType,int Direction>
struct unary_evaluator<Homogeneous<ArgType,Direction>, IndexBased>
: evaluator<typename Homogeneous<ArgType,Direction>::PlainObject >
{
typedef Homogeneous<ArgType,Direction> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op)
: Base(), m_temp(op)
{
::new (static_cast<Base*>(this)) Base(m_temp);
}
protected:
PlainObject m_temp;
};
// dense = homogeneous
template< typename DstXprType, typename ArgType, typename Scalar>
struct Assignment<DstXprType, Homogeneous<ArgType,Vertical>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense>
{
typedef Homogeneous<ArgType,Vertical> SrcXprType;
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
dst.template topRows<ArgType::RowsAtCompileTime>(src.nestedExpression().rows()) = src.nestedExpression();
dst.row(dst.rows()-1).setOnes();
}
};
// dense = homogeneous
template< typename DstXprType, typename ArgType, typename Scalar>
struct Assignment<DstXprType, Homogeneous<ArgType,Horizontal>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense>
{
typedef Homogeneous<ArgType,Horizontal> SrcXprType;
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
dst.template leftCols<ArgType::ColsAtCompileTime>(src.nestedExpression().cols()) = src.nestedExpression();
dst.col(dst.cols()-1).setOnes();
}
};
template<typename LhsArg, typename Rhs, int ProductTag>
struct generic_product_impl<Homogeneous<LhsArg,Horizontal>, Rhs, HomogeneousShape, DenseShape, ProductTag>
{
template<typename Dest>
EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const Homogeneous<LhsArg,Horizontal>& lhs, const Rhs& rhs)
{
homogeneous_right_product_impl<Homogeneous<LhsArg,Horizontal>, Rhs>(lhs.nestedExpression(), rhs).evalTo(dst);
}
};
template<typename Lhs,typename Rhs>
struct homogeneous_right_product_refactoring_helper
{
enum {
Dim = Lhs::ColsAtCompileTime,
Rows = Lhs::RowsAtCompileTime
};
typedef typename Rhs::template ConstNRowsBlockXpr<Dim>::Type LinearBlockConst;
typedef typename remove_const<LinearBlockConst>::type LinearBlock;
typedef typename Rhs::ConstRowXpr ConstantColumn;
typedef Replicate<const ConstantColumn,Rows,1> ConstantBlock;
typedef Product<Lhs,LinearBlock,LazyProduct> LinearProduct;
typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr;
};
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, HomogeneousShape, DenseShape>
: public evaluator<typename homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs>::Xpr>
{
typedef Product<Lhs, Rhs, LazyProduct> XprType;
typedef homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs> helper;
typedef typename helper::ConstantBlock ConstantBlock;
typedef typename helper::Xpr RefactoredXpr;
typedef evaluator<RefactoredXpr> Base;
EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
: Base( xpr.lhs().nestedExpression() .lazyProduct( xpr.rhs().template topRows<helper::Dim>(xpr.lhs().nestedExpression().cols()) )
+ ConstantBlock(xpr.rhs().row(xpr.rhs().rows()-1),xpr.lhs().rows(), 1) )
{}
};
template<typename Lhs, typename RhsArg, int ProductTag>
struct generic_product_impl<Lhs, Homogeneous<RhsArg,Vertical>, DenseShape, HomogeneousShape, ProductTag>
{
template<typename Dest>
EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const Lhs& lhs, const Homogeneous<RhsArg,Vertical>& rhs)
{
homogeneous_left_product_impl<Homogeneous<RhsArg,Vertical>, Lhs>(lhs, rhs.nestedExpression()).evalTo(dst);
}
};
// TODO: the following specialization is to address a regression from 3.2 to 3.3
// In the future, this path should be optimized.
template<typename Lhs, typename RhsArg, int ProductTag>
struct generic_product_impl<Lhs, Homogeneous<RhsArg,Vertical>, TriangularShape, HomogeneousShape, ProductTag>
{
template<typename Dest>
static void evalTo(Dest& dst, const Lhs& lhs, const Homogeneous<RhsArg,Vertical>& rhs)
{
dst.noalias() = lhs * rhs.eval();
}
};
template<typename Lhs,typename Rhs>
struct homogeneous_left_product_refactoring_helper
{
enum {
Dim = Rhs::RowsAtCompileTime,
Cols = Rhs::ColsAtCompileTime
};
typedef typename Lhs::template ConstNColsBlockXpr<Dim>::Type LinearBlockConst;
typedef typename remove_const<LinearBlockConst>::type LinearBlock;
typedef typename Lhs::ConstColXpr ConstantColumn;
typedef Replicate<const ConstantColumn,1,Cols> ConstantBlock;
typedef Product<LinearBlock,Rhs,LazyProduct> LinearProduct;
typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr;
};
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape, HomogeneousShape>
: public evaluator<typename homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression>::Xpr>
{
typedef Product<Lhs, Rhs, LazyProduct> XprType;
typedef homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression> helper;
typedef typename helper::ConstantBlock ConstantBlock;
typedef typename helper::Xpr RefactoredXpr;
typedef evaluator<RefactoredXpr> Base;
EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
: Base( xpr.lhs().template leftCols<helper::Dim>(xpr.rhs().nestedExpression().rows()) .lazyProduct( xpr.rhs().nestedExpression() )
+ ConstantBlock(xpr.lhs().col(xpr.lhs().cols()-1),1,xpr.rhs().cols()) )
{}
};
template<typename Scalar, int Dim, int Mode,int Options, typename RhsArg, int ProductTag>
struct generic_product_impl<Transform<Scalar,Dim,Mode,Options>, Homogeneous<RhsArg,Vertical>, DenseShape, HomogeneousShape, ProductTag>
{
typedef Transform<Scalar,Dim,Mode,Options> TransformType;
template<typename Dest>
EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const TransformType& lhs, const Homogeneous<RhsArg,Vertical>& rhs)
{
homogeneous_left_product_impl<Homogeneous<RhsArg,Vertical>, TransformType>(lhs, rhs.nestedExpression()).evalTo(dst);
}
};
template<typename ExpressionType, int Side, bool Transposed>
struct permutation_matrix_product<ExpressionType, Side, Transposed, HomogeneousShape>
: public permutation_matrix_product<ExpressionType, Side, Transposed, DenseShape>
{};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_HOMOGENEOUS_H
| 20,539 | 40.24498 | 149 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/Hyperplane.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_HYPERPLANE_H
#define EIGEN_HYPERPLANE_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
* \class Hyperplane
*
* \brief A hyperplane
*
* A hyperplane is an affine subspace of dimension n-1 in a space of dimension n.
* For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane.
*
* \tparam _Scalar the scalar type, i.e., the type of the coefficients
* \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
* Notice that the dimension of the hyperplane is _AmbientDim-1.
*
* This class represents an hyperplane as the zero set of the implicit equation
* \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part)
* and \f$ d \f$ is the distance (offset) to the origin.
*/
template <typename _Scalar, int _AmbientDim, int _Options>
class Hyperplane
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
enum {
AmbientDimAtCompileTime = _AmbientDim,
Options = _Options
};
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
? Dynamic
: Index(AmbientDimAtCompileTime)+1,1,Options> Coefficients;
typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType;
/** Default constructor without initialization */
EIGEN_DEVICE_FUNC inline Hyperplane() {}
template<int OtherOptions>
EIGEN_DEVICE_FUNC Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
: m_coeffs(other.coeffs())
{}
/** Constructs a dynamic-size hyperplane with \a _dim the dimension
* of the ambient space */
EIGEN_DEVICE_FUNC inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {}
/** Construct a plane from its normal \a n and a point \a e onto the plane.
* \warning the vector normal is assumed to be normalized.
*/
EIGEN_DEVICE_FUNC inline Hyperplane(const VectorType& n, const VectorType& e)
: m_coeffs(n.size()+1)
{
normal() = n;
offset() = -n.dot(e);
}
/** Constructs a plane from its normal \a n and distance to the origin \a d
* such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
* \warning the vector normal is assumed to be normalized.
*/
EIGEN_DEVICE_FUNC inline Hyperplane(const VectorType& n, const Scalar& d)
: m_coeffs(n.size()+1)
{
normal() = n;
offset() = d;
}
/** Constructs a hyperplane passing through the two points. If the dimension of the ambient space
* is greater than 2, then there isn't uniqueness, so an arbitrary choice is made.
*/
EIGEN_DEVICE_FUNC static inline Hyperplane Through(const VectorType& p0, const VectorType& p1)
{
Hyperplane result(p0.size());
result.normal() = (p1 - p0).unitOrthogonal();
result.offset() = -p0.dot(result.normal());
return result;
}
/** Constructs a hyperplane passing through the three points. The dimension of the ambient space
* is required to be exactly 3.
*/
EIGEN_DEVICE_FUNC static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
Hyperplane result(p0.size());
VectorType v0(p2 - p0), v1(p1 - p0);
result.normal() = v0.cross(v1);
RealScalar norm = result.normal().norm();
if(norm <= v0.norm() * v1.norm() * NumTraits<RealScalar>::epsilon())
{
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
result.normal() = svd.matrixV().col(2);
}
else
result.normal() /= norm;
result.offset() = -p0.dot(result.normal());
return result;
}
/** Constructs a hyperplane passing through the parametrized line \a parametrized.
* If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
* so an arbitrary choice is made.
*/
// FIXME to be consitent with the rest this could be implemented as a static Through function ??
EIGEN_DEVICE_FUNC explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
{
normal() = parametrized.direction().unitOrthogonal();
offset() = -parametrized.origin().dot(normal());
}
EIGEN_DEVICE_FUNC ~Hyperplane() {}
/** \returns the dimension in which the plane holds */
EIGEN_DEVICE_FUNC inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); }
/** normalizes \c *this */
EIGEN_DEVICE_FUNC void normalize(void)
{
m_coeffs /= normal().norm();
}
/** \returns the signed distance between the plane \c *this and a point \a p.
* \sa absDistance()
*/
EIGEN_DEVICE_FUNC inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); }
/** \returns the absolute distance between the plane \c *this and a point \a p.
* \sa signedDistance()
*/
EIGEN_DEVICE_FUNC inline Scalar absDistance(const VectorType& p) const { return numext::abs(signedDistance(p)); }
/** \returns the projection of a point \a p onto the plane \c *this.
*/
EIGEN_DEVICE_FUNC inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); }
/** \returns a constant reference to the unit normal vector of the plane, which corresponds
* to the linear part of the implicit equation.
*/
EIGEN_DEVICE_FUNC inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); }
/** \returns a non-constant reference to the unit normal vector of the plane, which corresponds
* to the linear part of the implicit equation.
*/
EIGEN_DEVICE_FUNC inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); }
/** \returns the distance to the origin, which is also the "constant term" of the implicit equation
* \warning the vector normal is assumed to be normalized.
*/
EIGEN_DEVICE_FUNC inline const Scalar& offset() const { return m_coeffs.coeff(dim()); }
/** \returns a non-constant reference to the distance to the origin, which is also the constant part
* of the implicit equation */
EIGEN_DEVICE_FUNC inline Scalar& offset() { return m_coeffs(dim()); }
/** \returns a constant reference to the coefficients c_i of the plane equation:
* \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
*/
EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; }
/** \returns a non-constant reference to the coefficients c_i of the plane equation:
* \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
*/
EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; }
/** \returns the intersection of *this with \a other.
*
* \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines.
*
* \note If \a other is approximately parallel to *this, this method will return any point on *this.
*/
EIGEN_DEVICE_FUNC VectorType intersection(const Hyperplane& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
// since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
// whether the two lines are approximately parallel.
if(internal::isMuchSmallerThan(det, Scalar(1)))
{ // special case where the two lines are approximately parallel. Pick any point on the first line.
if(numext::abs(coeffs().coeff(1))>numext::abs(coeffs().coeff(0)))
return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
else
return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
}
else
{ // general case
Scalar invdet = Scalar(1) / det;
return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)),
invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2)));
}
}
/** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this.
*
* \param mat the Dim x Dim transformation matrix
* \param traits specifies whether the matrix \a mat represents an #Isometry
* or a more generic #Affine transformation. The default is #Affine.
*/
template<typename XprType>
EIGEN_DEVICE_FUNC inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)
{
if (traits==Affine)
{
normal() = mat.inverse().transpose() * normal();
m_coeffs /= normal().norm();
}
else if (traits==Isometry)
normal() = mat * normal();
else
{
eigen_assert(0 && "invalid traits value in Hyperplane::transform()");
}
return *this;
}
/** Applies the transformation \a t to \c *this and returns a reference to \c *this.
*
* \param t the transformation of dimension Dim
* \param traits specifies whether the transformation \a t represents an #Isometry
* or a more generic #Affine transformation. The default is #Affine.
* Other kind of transformations are not supported.
*/
template<int TrOptions>
EIGEN_DEVICE_FUNC inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime,Affine,TrOptions>& t,
TransformTraits traits = Affine)
{
transform(t.linear(), traits);
offset() -= normal().dot(t.translation());
return *this;
}
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Hyperplane,
Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
{
return typename internal::cast_return_type<Hyperplane,
Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
}
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType,int OtherOptions>
EIGEN_DEVICE_FUNC inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
{ m_coeffs = other.coeffs().template cast<Scalar>(); }
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
template<int OtherOptions>
EIGEN_DEVICE_FUNC bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_coeffs.isApprox(other.m_coeffs, prec); }
protected:
Coefficients m_coeffs;
};
} // end namespace Eigen
#endif // EIGEN_HYPERPLANE_H
| 11,961 | 41.268551 | 194 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/OrthoMethods.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ORTHOMETHODS_H
#define EIGEN_ORTHOMETHODS_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
* \returns the cross product of \c *this and \a other
*
* Here is a very good explanation of cross-product: http://xkcd.com/199/
*
* With complex numbers, the cross product is implemented as
* \f$ (\mathbf{a}+i\mathbf{b}) \times (\mathbf{c}+i\mathbf{d}) = (\mathbf{a} \times \mathbf{c} - \mathbf{b} \times \mathbf{d}) - i(\mathbf{a} \times \mathbf{d} - \mathbf{b} \times \mathbf{c})\f$
*
* \sa MatrixBase::cross3()
*/
template<typename Derived>
template<typename OtherDerived>
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::template cross_product_return_type<OtherDerived>::type
#else
inline typename MatrixBase<Derived>::PlainObject
#endif
MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3)
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3)
// Note that there is no need for an expression here since the compiler
// optimize such a small temporary very well (even within a complex expression)
typename internal::nested_eval<Derived,2>::type lhs(derived());
typename internal::nested_eval<OtherDerived,2>::type rhs(other.derived());
return typename cross_product_return_type<OtherDerived>::type(
numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))
);
}
namespace internal {
template< int Arch,typename VectorLhs,typename VectorRhs,
typename Scalar = typename VectorLhs::Scalar,
bool Vectorizable = bool((VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit)>
struct cross3_impl {
EIGEN_DEVICE_FUNC static inline typename internal::plain_matrix_type<VectorLhs>::type
run(const VectorLhs& lhs, const VectorRhs& rhs)
{
return typename internal::plain_matrix_type<VectorLhs>::type(
numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),
0
);
}
};
}
/** \geometry_module \ingroup Geometry_Module
*
* \returns the cross product of \c *this and \a other using only the x, y, and z coefficients
*
* The size of \c *this and \a other must be four. This function is especially useful
* when using 4D vectors instead of 3D ones to get advantage of SSE/AltiVec vectorization.
*
* \sa MatrixBase::cross()
*/
template<typename Derived>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::PlainObject
MatrixBase<Derived>::cross3(const MatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4)
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4)
typedef typename internal::nested_eval<Derived,2>::type DerivedNested;
typedef typename internal::nested_eval<OtherDerived,2>::type OtherDerivedNested;
DerivedNested lhs(derived());
OtherDerivedNested rhs(other.derived());
return internal::cross3_impl<Architecture::Target,
typename internal::remove_all<DerivedNested>::type,
typename internal::remove_all<OtherDerivedNested>::type>::run(lhs,rhs);
}
/** \geometry_module \ingroup Geometry_Module
*
* \returns a matrix expression of the cross product of each column or row
* of the referenced expression with the \a other vector.
*
* The referenced matrix must have one dimension equal to 3.
* The result matrix has the same dimensions than the referenced one.
*
* \sa MatrixBase::cross() */
template<typename ExpressionType, int Direction>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
const typename VectorwiseOp<ExpressionType,Direction>::CrossReturnType
VectorwiseOp<ExpressionType,Direction>::cross(const MatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
typename internal::nested_eval<ExpressionType,2>::type mat(_expression());
typename internal::nested_eval<OtherDerived,2>::type vec(other.derived());
CrossReturnType res(_expression().rows(),_expression().cols());
if(Direction==Vertical)
{
eigen_assert(CrossReturnType::RowsAtCompileTime==3 && "the matrix must have exactly 3 rows");
res.row(0) = (mat.row(1) * vec.coeff(2) - mat.row(2) * vec.coeff(1)).conjugate();
res.row(1) = (mat.row(2) * vec.coeff(0) - mat.row(0) * vec.coeff(2)).conjugate();
res.row(2) = (mat.row(0) * vec.coeff(1) - mat.row(1) * vec.coeff(0)).conjugate();
}
else
{
eigen_assert(CrossReturnType::ColsAtCompileTime==3 && "the matrix must have exactly 3 columns");
res.col(0) = (mat.col(1) * vec.coeff(2) - mat.col(2) * vec.coeff(1)).conjugate();
res.col(1) = (mat.col(2) * vec.coeff(0) - mat.col(0) * vec.coeff(2)).conjugate();
res.col(2) = (mat.col(0) * vec.coeff(1) - mat.col(1) * vec.coeff(0)).conjugate();
}
return res;
}
namespace internal {
template<typename Derived, int Size = Derived::SizeAtCompileTime>
struct unitOrthogonal_selector
{
typedef typename plain_matrix_type<Derived>::type VectorType;
typedef typename traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar,2,1> Vector2;
EIGEN_DEVICE_FUNC
static inline VectorType run(const Derived& src)
{
VectorType perp = VectorType::Zero(src.size());
Index maxi = 0;
Index sndi = 0;
src.cwiseAbs().maxCoeff(&maxi);
if (maxi==0)
sndi = 1;
RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm();
perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm;
perp.coeffRef(sndi) = numext::conj(src.coeff(maxi)) * invnm;
return perp;
}
};
template<typename Derived>
struct unitOrthogonal_selector<Derived,3>
{
typedef typename plain_matrix_type<Derived>::type VectorType;
typedef typename traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
EIGEN_DEVICE_FUNC
static inline VectorType run(const Derived& src)
{
VectorType perp;
/* Let us compute the crossed product of *this with a vector
* that is not too close to being colinear to *this.
*/
/* unless the x and y coords are both close to zero, we can
* simply take ( -y, x, 0 ) and normalize it.
*/
if((!isMuchSmallerThan(src.x(), src.z()))
|| (!isMuchSmallerThan(src.y(), src.z())))
{
RealScalar invnm = RealScalar(1)/src.template head<2>().norm();
perp.coeffRef(0) = -numext::conj(src.y())*invnm;
perp.coeffRef(1) = numext::conj(src.x())*invnm;
perp.coeffRef(2) = 0;
}
/* if both x and y are close to zero, then the vector is close
* to the z-axis, so it's far from colinear to the x-axis for instance.
* So we take the crossed product with (1,0,0) and normalize it.
*/
else
{
RealScalar invnm = RealScalar(1)/src.template tail<2>().norm();
perp.coeffRef(0) = 0;
perp.coeffRef(1) = -numext::conj(src.z())*invnm;
perp.coeffRef(2) = numext::conj(src.y())*invnm;
}
return perp;
}
};
template<typename Derived>
struct unitOrthogonal_selector<Derived,2>
{
typedef typename plain_matrix_type<Derived>::type VectorType;
EIGEN_DEVICE_FUNC
static inline VectorType run(const Derived& src)
{ return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); }
};
} // end namespace internal
/** \geometry_module \ingroup Geometry_Module
*
* \returns a unit vector which is orthogonal to \c *this
*
* The size of \c *this must be at least 2. If the size is exactly 2,
* then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).normalized().
*
* \sa cross()
*/
template<typename Derived>
EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::PlainObject
MatrixBase<Derived>::unitOrthogonal() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return internal::unitOrthogonal_selector<Derived>::run(derived());
}
} // end namespace Eigen
#endif // EIGEN_ORTHOMETHODS_H
| 8,949 | 37.085106 | 196 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/ParametrizedLine.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARAMETRIZEDLINE_H
#define EIGEN_PARAMETRIZEDLINE_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
* \class ParametrizedLine
*
* \brief A parametrized line
*
* A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
* direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
* the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$.
*
* \tparam _Scalar the scalar type, i.e., the type of the coefficients
* \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
*/
template <typename _Scalar, int _AmbientDim, int _Options>
class ParametrizedLine
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
enum {
AmbientDimAtCompileTime = _AmbientDim,
Options = _Options
};
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
/** Default constructor without initialization */
EIGEN_DEVICE_FUNC inline ParametrizedLine() {}
template<int OtherOptions>
EIGEN_DEVICE_FUNC ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
: m_origin(other.origin()), m_direction(other.direction())
{}
/** Constructs a dynamic-size line with \a _dim the dimension
* of the ambient space */
EIGEN_DEVICE_FUNC inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {}
/** Initializes a parametrized line of direction \a direction and origin \a origin.
* \warning the vector direction is assumed to be normalized.
*/
EIGEN_DEVICE_FUNC ParametrizedLine(const VectorType& origin, const VectorType& direction)
: m_origin(origin), m_direction(direction) {}
template <int OtherOptions>
EIGEN_DEVICE_FUNC explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane);
/** Constructs a parametrized line going from \a p0 to \a p1. */
EIGEN_DEVICE_FUNC static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1)
{ return ParametrizedLine(p0, (p1-p0).normalized()); }
EIGEN_DEVICE_FUNC ~ParametrizedLine() {}
/** \returns the dimension in which the line holds */
EIGEN_DEVICE_FUNC inline Index dim() const { return m_direction.size(); }
EIGEN_DEVICE_FUNC const VectorType& origin() const { return m_origin; }
EIGEN_DEVICE_FUNC VectorType& origin() { return m_origin; }
EIGEN_DEVICE_FUNC const VectorType& direction() const { return m_direction; }
EIGEN_DEVICE_FUNC VectorType& direction() { return m_direction; }
/** \returns the squared distance of a point \a p to its projection onto the line \c *this.
* \sa distance()
*/
EIGEN_DEVICE_FUNC RealScalar squaredDistance(const VectorType& p) const
{
VectorType diff = p - origin();
return (diff - direction().dot(diff) * direction()).squaredNorm();
}
/** \returns the distance of a point \a p to its projection onto the line \c *this.
* \sa squaredDistance()
*/
EIGEN_DEVICE_FUNC RealScalar distance(const VectorType& p) const { EIGEN_USING_STD_MATH(sqrt) return sqrt(squaredDistance(p)); }
/** \returns the projection of a point \a p onto the line \c *this. */
EIGEN_DEVICE_FUNC VectorType projection(const VectorType& p) const
{ return origin() + direction().dot(p-origin()) * direction(); }
EIGEN_DEVICE_FUNC VectorType pointAt(const Scalar& t) const;
template <int OtherOptions>
EIGEN_DEVICE_FUNC Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
template <int OtherOptions>
EIGEN_DEVICE_FUNC Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
template <int OtherOptions>
EIGEN_DEVICE_FUNC VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<ParametrizedLine,
ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
{
return typename internal::cast_return_type<ParametrizedLine,
ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
}
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType,int OtherOptions>
EIGEN_DEVICE_FUNC inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
{
m_origin = other.origin().template cast<Scalar>();
m_direction = other.direction().template cast<Scalar>();
}
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
EIGEN_DEVICE_FUNC bool isApprox(const ParametrizedLine& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
protected:
VectorType m_origin, m_direction;
};
/** Constructs a parametrized line from a 2D hyperplane
*
* \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line
*/
template <typename _Scalar, int _AmbientDim, int _Options>
template <int OtherOptions>
EIGEN_DEVICE_FUNC inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
direction() = hyperplane.normal().unitOrthogonal();
origin() = -hyperplane.normal()*hyperplane.offset();
}
/** \returns the point at \a t along this line
*/
template <typename _Scalar, int _AmbientDim, int _Options>
EIGEN_DEVICE_FUNC inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const
{
return origin() + (direction()*t);
}
/** \returns the parameter value of the intersection between \c *this and the given \a hyperplane
*/
template <typename _Scalar, int _AmbientDim, int _Options>
template <int OtherOptions>
EIGEN_DEVICE_FUNC inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
{
return -(hyperplane.offset()+hyperplane.normal().dot(origin()))
/ hyperplane.normal().dot(direction());
}
/** \deprecated use intersectionParameter()
* \returns the parameter value of the intersection between \c *this and the given \a hyperplane
*/
template <typename _Scalar, int _AmbientDim, int _Options>
template <int OtherOptions>
EIGEN_DEVICE_FUNC inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
{
return intersectionParameter(hyperplane);
}
/** \returns the point of the intersection between \c *this and the given hyperplane
*/
template <typename _Scalar, int _AmbientDim, int _Options>
template <int OtherOptions>
EIGEN_DEVICE_FUNC inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
{
return pointAt(intersectionParameter(hyperplane));
}
} // end namespace Eigen
#endif // EIGEN_PARAMETRIZEDLINE_H
| 8,308 | 41.392857 | 175 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/Quaternion.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Mathieu Gautier <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_QUATERNION_H
#define EIGEN_QUATERNION_H
namespace Eigen {
/***************************************************************************
* Definition of QuaternionBase<Derived>
* The implementation is at the end of the file
***************************************************************************/
namespace internal {
template<typename Other,
int OtherRows=Other::RowsAtCompileTime,
int OtherCols=Other::ColsAtCompileTime>
struct quaternionbase_assign_impl;
}
/** \geometry_module \ingroup Geometry_Module
* \class QuaternionBase
* \brief Base class for quaternion expressions
* \tparam Derived derived type (CRTP)
* \sa class Quaternion
*/
template<class Derived>
class QuaternionBase : public RotationBase<Derived, 3>
{
public:
typedef RotationBase<Derived, 3> Base;
using Base::operator*;
using Base::derived;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename internal::traits<Derived>::Coefficients Coefficients;
enum {
Flags = Eigen::internal::traits<Derived>::Flags
};
// typedef typename Matrix<Scalar,4,1> Coefficients;
/** the type of a 3D vector */
typedef Matrix<Scalar,3,1> Vector3;
/** the equivalent rotation matrix type */
typedef Matrix<Scalar,3,3> Matrix3;
/** the equivalent angle-axis type */
typedef AngleAxis<Scalar> AngleAxisType;
/** \returns the \c x coefficient */
EIGEN_DEVICE_FUNC inline Scalar x() const { return this->derived().coeffs().coeff(0); }
/** \returns the \c y coefficient */
EIGEN_DEVICE_FUNC inline Scalar y() const { return this->derived().coeffs().coeff(1); }
/** \returns the \c z coefficient */
EIGEN_DEVICE_FUNC inline Scalar z() const { return this->derived().coeffs().coeff(2); }
/** \returns the \c w coefficient */
EIGEN_DEVICE_FUNC inline Scalar w() const { return this->derived().coeffs().coeff(3); }
/** \returns a reference to the \c x coefficient */
EIGEN_DEVICE_FUNC inline Scalar& x() { return this->derived().coeffs().coeffRef(0); }
/** \returns a reference to the \c y coefficient */
EIGEN_DEVICE_FUNC inline Scalar& y() { return this->derived().coeffs().coeffRef(1); }
/** \returns a reference to the \c z coefficient */
EIGEN_DEVICE_FUNC inline Scalar& z() { return this->derived().coeffs().coeffRef(2); }
/** \returns a reference to the \c w coefficient */
EIGEN_DEVICE_FUNC inline Scalar& w() { return this->derived().coeffs().coeffRef(3); }
/** \returns a read-only vector expression of the imaginary part (x,y,z) */
EIGEN_DEVICE_FUNC inline const VectorBlock<const Coefficients,3> vec() const { return coeffs().template head<3>(); }
/** \returns a vector expression of the imaginary part (x,y,z) */
EIGEN_DEVICE_FUNC inline VectorBlock<Coefficients,3> vec() { return coeffs().template head<3>(); }
/** \returns a read-only vector expression of the coefficients (x,y,z,w) */
EIGEN_DEVICE_FUNC inline const typename internal::traits<Derived>::Coefficients& coeffs() const { return derived().coeffs(); }
/** \returns a vector expression of the coefficients (x,y,z,w) */
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Coefficients& coeffs() { return derived().coeffs(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE QuaternionBase<Derived>& operator=(const QuaternionBase<Derived>& other);
template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase<OtherDerived>& other);
// disabled this copy operator as it is giving very strange compilation errors when compiling
// test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's
// useful; however notice that we already have the templated operator= above and e.g. in MatrixBase
// we didn't have to add, in addition to templated operator=, such a non-templated copy operator.
// Derived& operator=(const QuaternionBase& other)
// { return operator=<Derived>(other); }
EIGEN_DEVICE_FUNC Derived& operator=(const AngleAxisType& aa);
template<class OtherDerived> EIGEN_DEVICE_FUNC Derived& operator=(const MatrixBase<OtherDerived>& m);
/** \returns a quaternion representing an identity rotation
* \sa MatrixBase::Identity()
*/
EIGEN_DEVICE_FUNC static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); }
/** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
*/
EIGEN_DEVICE_FUNC inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; }
/** \returns the squared norm of the quaternion's coefficients
* \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
*/
EIGEN_DEVICE_FUNC inline Scalar squaredNorm() const { return coeffs().squaredNorm(); }
/** \returns the norm of the quaternion's coefficients
* \sa QuaternionBase::squaredNorm(), MatrixBase::norm()
*/
EIGEN_DEVICE_FUNC inline Scalar norm() const { return coeffs().norm(); }
/** Normalizes the quaternion \c *this
* \sa normalized(), MatrixBase::normalize() */
EIGEN_DEVICE_FUNC inline void normalize() { coeffs().normalize(); }
/** \returns a normalized copy of \c *this
* \sa normalize(), MatrixBase::normalized() */
EIGEN_DEVICE_FUNC inline Quaternion<Scalar> normalized() const { return Quaternion<Scalar>(coeffs().normalized()); }
/** \returns the dot product of \c *this and \a other
* Geometrically speaking, the dot product of two unit quaternions
* corresponds to the cosine of half the angle between the two rotations.
* \sa angularDistance()
*/
template<class OtherDerived> EIGEN_DEVICE_FUNC inline Scalar dot(const QuaternionBase<OtherDerived>& other) const { return coeffs().dot(other.coeffs()); }
template<class OtherDerived> EIGEN_DEVICE_FUNC Scalar angularDistance(const QuaternionBase<OtherDerived>& other) const;
/** \returns an equivalent 3x3 rotation matrix */
EIGEN_DEVICE_FUNC Matrix3 toRotationMatrix() const;
/** \returns the quaternion which transform \a a into \a b through a rotation */
template<typename Derived1, typename Derived2>
EIGEN_DEVICE_FUNC Derived& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion<Scalar> operator* (const QuaternionBase<OtherDerived>& q) const;
template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase<OtherDerived>& q);
/** \returns the quaternion describing the inverse rotation */
EIGEN_DEVICE_FUNC Quaternion<Scalar> inverse() const;
/** \returns the conjugated quaternion */
EIGEN_DEVICE_FUNC Quaternion<Scalar> conjugate() const;
template<class OtherDerived> EIGEN_DEVICE_FUNC Quaternion<Scalar> slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const;
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
template<class OtherDerived>
EIGEN_DEVICE_FUNC bool isApprox(const QuaternionBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
{ return coeffs().isApprox(other.coeffs(), prec); }
/** return the result vector of \a v through the rotation*/
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const
{
return typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type(derived());
}
#ifdef EIGEN_QUATERNIONBASE_PLUGIN
# include EIGEN_QUATERNIONBASE_PLUGIN
#endif
};
/***************************************************************************
* Definition/implementation of Quaternion<Scalar>
***************************************************************************/
/** \geometry_module \ingroup Geometry_Module
*
* \class Quaternion
*
* \brief The quaternion class used to represent 3D orientations and rotations
*
* \tparam _Scalar the scalar type, i.e., the type of the coefficients
* \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign.
*
* This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
* orientations and rotations of objects in three dimensions. Compared to other representations
* like Euler angles or 3x3 matrices, quaternions offer the following advantages:
* \li \b compact storage (4 scalars)
* \li \b efficient to compose (28 flops),
* \li \b stable spherical interpolation
*
* The following two typedefs are provided for convenience:
* \li \c Quaternionf for \c float
* \li \c Quaterniond for \c double
*
* \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized.
*
* \sa class AngleAxis, class Transform
*/
namespace internal {
template<typename _Scalar,int _Options>
struct traits<Quaternion<_Scalar,_Options> >
{
typedef Quaternion<_Scalar,_Options> PlainObject;
typedef _Scalar Scalar;
typedef Matrix<_Scalar,4,1,_Options> Coefficients;
enum{
Alignment = internal::traits<Coefficients>::Alignment,
Flags = LvalueBit
};
};
}
template<typename _Scalar, int _Options>
class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
{
public:
typedef QuaternionBase<Quaternion<_Scalar,_Options> > Base;
enum { NeedsAlignment = internal::traits<Quaternion>::Alignment>0 };
typedef _Scalar Scalar;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion)
using Base::operator*=;
typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
typedef typename Base::AngleAxisType AngleAxisType;
/** Default constructor leaving the quaternion uninitialized. */
EIGEN_DEVICE_FUNC inline Quaternion() {}
/** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from
* its four coefficients \a w, \a x, \a y and \a z.
*
* \warning Note the order of the arguments: the real \a w coefficient first,
* while internally the coefficients are stored in the following order:
* [\c x, \c y, \c z, \c w]
*/
EIGEN_DEVICE_FUNC inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){}
/** Constructs and initialize a quaternion from the array data */
EIGEN_DEVICE_FUNC explicit inline Quaternion(const Scalar* data) : m_coeffs(data) {}
/** Copy constructor */
template<class Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion(const QuaternionBase<Derived>& other) { this->Base::operator=(other); }
/** Constructs and initializes a quaternion from the angle-axis \a aa */
EIGEN_DEVICE_FUNC explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }
/** Constructs and initializes a quaternion from either:
* - a rotation matrix expression,
* - a 4D vector expression representing quaternion coefficients.
*/
template<typename Derived>
EIGEN_DEVICE_FUNC explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
/** Explicit copy constructor with scalar conversion */
template<typename OtherScalar, int OtherOptions>
EIGEN_DEVICE_FUNC explicit inline Quaternion(const Quaternion<OtherScalar, OtherOptions>& other)
{ m_coeffs = other.coeffs().template cast<Scalar>(); }
EIGEN_DEVICE_FUNC static Quaternion UnitRandom();
template<typename Derived1, typename Derived2>
EIGEN_DEVICE_FUNC static Quaternion FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs;}
EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs;}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(NeedsAlignment))
#ifdef EIGEN_QUATERNION_PLUGIN
# include EIGEN_QUATERNION_PLUGIN
#endif
protected:
Coefficients m_coeffs;
#ifndef EIGEN_PARSED_BY_DOXYGEN
static EIGEN_STRONG_INLINE void _check_template_params()
{
EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options,
INVALID_MATRIX_TEMPLATE_PARAMETERS)
}
#endif
};
/** \ingroup Geometry_Module
* single precision quaternion type */
typedef Quaternion<float> Quaternionf;
/** \ingroup Geometry_Module
* double precision quaternion type */
typedef Quaternion<double> Quaterniond;
/***************************************************************************
* Specialization of Map<Quaternion<Scalar>>
***************************************************************************/
namespace internal {
template<typename _Scalar, int _Options>
struct traits<Map<Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> >
{
typedef Map<Matrix<_Scalar,4,1>, _Options> Coefficients;
};
}
namespace internal {
template<typename _Scalar, int _Options>
struct traits<Map<const Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> >
{
typedef Map<const Matrix<_Scalar,4,1>, _Options> Coefficients;
typedef traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> > TraitsBase;
enum {
Flags = TraitsBase::Flags & ~LvalueBit
};
};
}
/** \ingroup Geometry_Module
* \brief Quaternion expression mapping a constant memory buffer
*
* \tparam _Scalar the type of the Quaternion coefficients
* \tparam _Options see class Map
*
* This is a specialization of class Map for Quaternion. This class allows to view
* a 4 scalar memory buffer as an Eigen's Quaternion object.
*
* \sa class Map, class Quaternion, class QuaternionBase
*/
template<typename _Scalar, int _Options>
class Map<const Quaternion<_Scalar>, _Options >
: public QuaternionBase<Map<const Quaternion<_Scalar>, _Options> >
{
public:
typedef QuaternionBase<Map<const Quaternion<_Scalar>, _Options> > Base;
typedef _Scalar Scalar;
typedef typename internal::traits<Map>::Coefficients Coefficients;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
using Base::operator*=;
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
*
* The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
* \code *coeffs == {x, y, z, w} \endcode
*
* If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {}
EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs;}
protected:
const Coefficients m_coeffs;
};
/** \ingroup Geometry_Module
* \brief Expression of a quaternion from a memory buffer
*
* \tparam _Scalar the type of the Quaternion coefficients
* \tparam _Options see class Map
*
* This is a specialization of class Map for Quaternion. This class allows to view
* a 4 scalar memory buffer as an Eigen's Quaternion object.
*
* \sa class Map, class Quaternion, class QuaternionBase
*/
template<typename _Scalar, int _Options>
class Map<Quaternion<_Scalar>, _Options >
: public QuaternionBase<Map<Quaternion<_Scalar>, _Options> >
{
public:
typedef QuaternionBase<Map<Quaternion<_Scalar>, _Options> > Base;
typedef _Scalar Scalar;
typedef typename internal::traits<Map>::Coefficients Coefficients;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
using Base::operator*=;
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
*
* The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
* \code *coeffs == {x, y, z, w} \endcode
*
* If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {}
EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; }
EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; }
protected:
Coefficients m_coeffs;
};
/** \ingroup Geometry_Module
* Map an unaligned array of single precision scalars as a quaternion */
typedef Map<Quaternion<float>, 0> QuaternionMapf;
/** \ingroup Geometry_Module
* Map an unaligned array of double precision scalars as a quaternion */
typedef Map<Quaternion<double>, 0> QuaternionMapd;
/** \ingroup Geometry_Module
* Map a 16-byte aligned array of single precision scalars as a quaternion */
typedef Map<Quaternion<float>, Aligned> QuaternionMapAlignedf;
/** \ingroup Geometry_Module
* Map a 16-byte aligned array of double precision scalars as a quaternion */
typedef Map<Quaternion<double>, Aligned> QuaternionMapAlignedd;
/***************************************************************************
* Implementation of QuaternionBase methods
***************************************************************************/
// Generic Quaternion * Quaternion product
// This product can be specialized for a given architecture via the Arch template argument.
namespace internal {
template<int Arch, class Derived1, class Derived2, typename Scalar> struct quat_product
{
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived1>& a, const QuaternionBase<Derived2>& b){
return Quaternion<Scalar>
(
a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()
);
}
};
}
/** \returns the concatenation of two rotations as a quaternion-quaternion product */
template <class Derived>
template <class OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion<typename internal::traits<Derived>::Scalar>
QuaternionBase<Derived>::operator* (const QuaternionBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
return internal::quat_product<Architecture::Target, Derived, OtherDerived,
typename internal::traits<Derived>::Scalar>::run(*this, other);
}
/** \sa operator*(Quaternion) */
template <class Derived>
template <class OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const QuaternionBase<OtherDerived>& other)
{
derived() = derived() * other.derived();
return derived();
}
/** Rotation of a vector by a quaternion.
* \remarks If the quaternion is used to rotate several points (>1)
* then it is much more efficient to first convert it to a 3x3 Matrix.
* Comparison of the operation cost for n transformations:
* - Quaternion2: 30n
* - Via a Matrix3: 24 + 15n
*/
template <class Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
QuaternionBase<Derived>::_transformVector(const Vector3& v) const
{
// Note that this algorithm comes from the optimization by hand
// of the conversion to a Matrix followed by a Matrix/Vector product.
// It appears to be much faster than the common algorithm found
// in the literature (30 versus 39 flops). It also requires two
// Vector3 as temporaries.
Vector3 uv = this->vec().cross(v);
uv += uv;
return v + this->w() * uv + this->vec().cross(uv);
}
template<class Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE QuaternionBase<Derived>& QuaternionBase<Derived>::operator=(const QuaternionBase<Derived>& other)
{
coeffs() = other.coeffs();
return derived();
}
template<class Derived>
template<class OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const QuaternionBase<OtherDerived>& other)
{
coeffs() = other.coeffs();
return derived();
}
/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this
*/
template<class Derived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa)
{
EIGEN_USING_STD_MATH(cos)
EIGEN_USING_STD_MATH(sin)
Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
this->w() = cos(ha);
this->vec() = sin(ha) * aa.axis();
return derived();
}
/** Set \c *this from the expression \a xpr:
* - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion
* - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix
* and \a xpr is converted to a quaternion
*/
template<class Derived>
template<class MatrixDerived>
EIGEN_DEVICE_FUNC inline Derived& QuaternionBase<Derived>::operator=(const MatrixBase<MatrixDerived>& xpr)
{
EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename MatrixDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
internal::quaternionbase_assign_impl<MatrixDerived>::run(*this, xpr.derived());
return derived();
}
/** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to
* be normalized, otherwise the result is undefined.
*/
template<class Derived>
EIGEN_DEVICE_FUNC inline typename QuaternionBase<Derived>::Matrix3
QuaternionBase<Derived>::toRotationMatrix(void) const
{
// NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)
// if not inlined then the cost of the return by value is huge ~ +35%,
// however, not inlining this function is an order of magnitude slower, so
// it has to be inlined, and so the return by value is not an issue
Matrix3 res;
const Scalar tx = Scalar(2)*this->x();
const Scalar ty = Scalar(2)*this->y();
const Scalar tz = Scalar(2)*this->z();
const Scalar twx = tx*this->w();
const Scalar twy = ty*this->w();
const Scalar twz = tz*this->w();
const Scalar txx = tx*this->x();
const Scalar txy = ty*this->x();
const Scalar txz = tz*this->x();
const Scalar tyy = ty*this->y();
const Scalar tyz = tz*this->y();
const Scalar tzz = tz*this->z();
res.coeffRef(0,0) = Scalar(1)-(tyy+tzz);
res.coeffRef(0,1) = txy-twz;
res.coeffRef(0,2) = txz+twy;
res.coeffRef(1,0) = txy+twz;
res.coeffRef(1,1) = Scalar(1)-(txx+tzz);
res.coeffRef(1,2) = tyz-twx;
res.coeffRef(2,0) = txz-twy;
res.coeffRef(2,1) = tyz+twx;
res.coeffRef(2,2) = Scalar(1)-(txx+tyy);
return res;
}
/** Sets \c *this to be a quaternion representing a rotation between
* the two arbitrary vectors \a a and \a b. In other words, the built
* rotation represent a rotation sending the line of direction \a a
* to the line of direction \a b, both lines passing through the origin.
*
* \returns a reference to \c *this.
*
* Note that the two input vectors do \b not have to be normalized, and
* do not need to have the same norm.
*/
template<class Derived>
template<typename Derived1, typename Derived2>
EIGEN_DEVICE_FUNC inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
{
EIGEN_USING_STD_MATH(sqrt)
Vector3 v0 = a.normalized();
Vector3 v1 = b.normalized();
Scalar c = v1.dot(v0);
// if dot == -1, vectors are nearly opposites
// => accurately compute the rotation axis by computing the
// intersection of the two planes. This is done by solving:
// x^T v0 = 0
// x^T v1 = 0
// under the constraint:
// ||x|| = 1
// which yields a singular value problem
if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
{
c = numext::maxi(c,Scalar(-1));
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
Vector3 axis = svd.matrixV().col(2);
Scalar w2 = (Scalar(1)+c)*Scalar(0.5);
this->w() = sqrt(w2);
this->vec() = axis * sqrt(Scalar(1) - w2);
return derived();
}
Vector3 axis = v0.cross(v1);
Scalar s = sqrt((Scalar(1)+c)*Scalar(2));
Scalar invs = Scalar(1)/s;
this->vec() = axis * invs;
this->w() = s * Scalar(0.5);
return derived();
}
/** \returns a random unit quaternion following a uniform distribution law on SO(3)
*
* \note The implementation is based on http://planning.cs.uiuc.edu/node198.html
*/
template<typename Scalar, int Options>
EIGEN_DEVICE_FUNC Quaternion<Scalar,Options> Quaternion<Scalar,Options>::UnitRandom()
{
EIGEN_USING_STD_MATH(sqrt)
EIGEN_USING_STD_MATH(sin)
EIGEN_USING_STD_MATH(cos)
const Scalar u1 = internal::random<Scalar>(0, 1),
u2 = internal::random<Scalar>(0, 2*EIGEN_PI),
u3 = internal::random<Scalar>(0, 2*EIGEN_PI);
const Scalar a = sqrt(1 - u1),
b = sqrt(u1);
return Quaternion (a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3));
}
/** Returns a quaternion representing a rotation between
* the two arbitrary vectors \a a and \a b. In other words, the built
* rotation represent a rotation sending the line of direction \a a
* to the line of direction \a b, both lines passing through the origin.
*
* \returns resulting quaternion
*
* Note that the two input vectors do \b not have to be normalized, and
* do not need to have the same norm.
*/
template<typename Scalar, int Options>
template<typename Derived1, typename Derived2>
EIGEN_DEVICE_FUNC Quaternion<Scalar,Options> Quaternion<Scalar,Options>::FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
{
Quaternion quat;
quat.setFromTwoVectors(a, b);
return quat;
}
/** \returns the multiplicative inverse of \c *this
* Note that in most cases, i.e., if you simply want the opposite rotation,
* and/or the quaternion is normalized, then it is enough to use the conjugate.
*
* \sa QuaternionBase::conjugate()
*/
template <class Derived>
EIGEN_DEVICE_FUNC inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::inverse() const
{
// FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ??
Scalar n2 = this->squaredNorm();
if (n2 > Scalar(0))
return Quaternion<Scalar>(conjugate().coeffs() / n2);
else
{
// return an invalid result to flag the error
return Quaternion<Scalar>(Coefficients::Zero());
}
}
// Generic conjugate of a Quaternion
namespace internal {
template<int Arch, class Derived, typename Scalar> struct quat_conj
{
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived>& q){
return Quaternion<Scalar>(q.w(),-q.x(),-q.y(),-q.z());
}
};
}
/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse
* if the quaternion is normalized.
* The conjugate of a quaternion represents the opposite rotation.
*
* \sa Quaternion2::inverse()
*/
template <class Derived>
EIGEN_DEVICE_FUNC inline Quaternion<typename internal::traits<Derived>::Scalar>
QuaternionBase<Derived>::conjugate() const
{
return internal::quat_conj<Architecture::Target, Derived,
typename internal::traits<Derived>::Scalar>::run(*this);
}
/** \returns the angle (in radian) between two rotations
* \sa dot()
*/
template <class Derived>
template <class OtherDerived>
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
{
EIGEN_USING_STD_MATH(atan2)
Quaternion<Scalar> d = (*this) * other.conjugate();
return Scalar(2) * atan2( d.vec().norm(), numext::abs(d.w()) );
}
/** \returns the spherical linear interpolation between the two quaternions
* \c *this and \a other at the parameter \a t in [0;1].
*
* This represents an interpolation for a constant motion between \c *this and \a other,
* see also http://en.wikipedia.org/wiki/Slerp.
*/
template <class Derived>
template <class OtherDerived>
EIGEN_DEVICE_FUNC Quaternion<typename internal::traits<Derived>::Scalar>
QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const
{
EIGEN_USING_STD_MATH(acos)
EIGEN_USING_STD_MATH(sin)
const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();
Scalar d = this->dot(other);
Scalar absD = numext::abs(d);
Scalar scale0;
Scalar scale1;
if(absD>=one)
{
scale0 = Scalar(1) - t;
scale1 = t;
}
else
{
// theta is the angle between the 2 quaternions
Scalar theta = acos(absD);
Scalar sinTheta = sin(theta);
scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
scale1 = sin( ( t * theta) ) / sinTheta;
}
if(d<Scalar(0)) scale1 = -scale1;
return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
}
namespace internal {
// set from a rotation matrix
template<typename Other>
struct quaternionbase_assign_impl<Other,3,3>
{
typedef typename Other::Scalar Scalar;
template<class Derived> EIGEN_DEVICE_FUNC static inline void run(QuaternionBase<Derived>& q, const Other& a_mat)
{
const typename internal::nested_eval<Other,2>::type mat(a_mat);
EIGEN_USING_STD_MATH(sqrt)
// This algorithm comes from "Quaternion Calculus and Fast Animation",
// Ken Shoemake, 1987 SIGGRAPH course notes
Scalar t = mat.trace();
if (t > Scalar(0))
{
t = sqrt(t + Scalar(1.0));
q.w() = Scalar(0.5)*t;
t = Scalar(0.5)/t;
q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;
q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;
q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;
}
else
{
Index i = 0;
if (mat.coeff(1,1) > mat.coeff(0,0))
i = 1;
if (mat.coeff(2,2) > mat.coeff(i,i))
i = 2;
Index j = (i+1)%3;
Index k = (j+1)%3;
t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
q.coeffs().coeffRef(i) = Scalar(0.5) * t;
t = Scalar(0.5)/t;
q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;
q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;
q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;
}
}
};
// set from a vector of coefficients assumed to be a quaternion
template<typename Other>
struct quaternionbase_assign_impl<Other,4,1>
{
typedef typename Other::Scalar Scalar;
template<class Derived> EIGEN_DEVICE_FUNC static inline void run(QuaternionBase<Derived>& q, const Other& vec)
{
q.coeffs() = vec;
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_QUATERNION_H
| 31,688 | 38.122222 | 156 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/Rotation2D.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ROTATION2D_H
#define EIGEN_ROTATION2D_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
* \class Rotation2D
*
* \brief Represents a rotation/orientation in a 2 dimensional space.
*
* \tparam _Scalar the scalar type, i.e., the type of the coefficients
*
* This class is equivalent to a single scalar representing a counter clock wise rotation
* as a single angle in radian. It provides some additional features such as the automatic
* conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar
* interface to Quaternion in order to facilitate the writing of generic algorithms
* dealing with rotations.
*
* \sa class Quaternion, class Transform
*/
namespace internal {
template<typename _Scalar> struct traits<Rotation2D<_Scalar> >
{
typedef _Scalar Scalar;
};
} // end namespace internal
template<typename _Scalar>
class Rotation2D : public RotationBase<Rotation2D<_Scalar>,2>
{
typedef RotationBase<Rotation2D<_Scalar>,2> Base;
public:
using Base::operator*;
enum { Dim = 2 };
/** the scalar type of the coefficients */
typedef _Scalar Scalar;
typedef Matrix<Scalar,2,1> Vector2;
typedef Matrix<Scalar,2,2> Matrix2;
protected:
Scalar m_angle;
public:
/** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
EIGEN_DEVICE_FUNC explicit inline Rotation2D(const Scalar& a) : m_angle(a) {}
/** Default constructor wihtout initialization. The represented rotation is undefined. */
EIGEN_DEVICE_FUNC Rotation2D() {}
/** Construct a 2D rotation from a 2x2 rotation matrix \a mat.
*
* \sa fromRotationMatrix()
*/
template<typename Derived>
EIGEN_DEVICE_FUNC explicit Rotation2D(const MatrixBase<Derived>& m)
{
fromRotationMatrix(m.derived());
}
/** \returns the rotation angle */
EIGEN_DEVICE_FUNC inline Scalar angle() const { return m_angle; }
/** \returns a read-write reference to the rotation angle */
EIGEN_DEVICE_FUNC inline Scalar& angle() { return m_angle; }
/** \returns the rotation angle in [0,2pi] */
EIGEN_DEVICE_FUNC inline Scalar smallestPositiveAngle() const {
Scalar tmp = numext::fmod(m_angle,Scalar(2*EIGEN_PI));
return tmp<Scalar(0) ? tmp + Scalar(2*EIGEN_PI) : tmp;
}
/** \returns the rotation angle in [-pi,pi] */
EIGEN_DEVICE_FUNC inline Scalar smallestAngle() const {
Scalar tmp = numext::fmod(m_angle,Scalar(2*EIGEN_PI));
if(tmp>Scalar(EIGEN_PI)) tmp -= Scalar(2*EIGEN_PI);
else if(tmp<-Scalar(EIGEN_PI)) tmp += Scalar(2*EIGEN_PI);
return tmp;
}
/** \returns the inverse rotation */
EIGEN_DEVICE_FUNC inline Rotation2D inverse() const { return Rotation2D(-m_angle); }
/** Concatenates two rotations */
EIGEN_DEVICE_FUNC inline Rotation2D operator*(const Rotation2D& other) const
{ return Rotation2D(m_angle + other.m_angle); }
/** Concatenates two rotations */
EIGEN_DEVICE_FUNC inline Rotation2D& operator*=(const Rotation2D& other)
{ m_angle += other.m_angle; return *this; }
/** Applies the rotation to a 2D vector */
EIGEN_DEVICE_FUNC Vector2 operator* (const Vector2& vec) const
{ return toRotationMatrix() * vec; }
template<typename Derived>
EIGEN_DEVICE_FUNC Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
EIGEN_DEVICE_FUNC Matrix2 toRotationMatrix() const;
/** Set \c *this from a 2x2 rotation matrix \a mat.
* In other words, this function extract the rotation angle from the rotation matrix.
*
* This method is an alias for fromRotationMatrix()
*
* \sa fromRotationMatrix()
*/
template<typename Derived>
EIGEN_DEVICE_FUNC Rotation2D& operator=(const MatrixBase<Derived>& m)
{ return fromRotationMatrix(m.derived()); }
/** \returns the spherical interpolation between \c *this and \a other using
* parameter \a t. It is in fact equivalent to a linear interpolation.
*/
EIGEN_DEVICE_FUNC inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const
{
Scalar dist = Rotation2D(other.m_angle-m_angle).smallestAngle();
return Rotation2D(m_angle + dist*t);
}
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
{ return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
EIGEN_DEVICE_FUNC inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other)
{
m_angle = Scalar(other.angle());
}
EIGEN_DEVICE_FUNC static inline Rotation2D Identity() { return Rotation2D(0); }
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
EIGEN_DEVICE_FUNC bool isApprox(const Rotation2D& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return internal::isApprox(m_angle,other.m_angle, prec); }
};
/** \ingroup Geometry_Module
* single precision 2D rotation type */
typedef Rotation2D<float> Rotation2Df;
/** \ingroup Geometry_Module
* double precision 2D rotation type */
typedef Rotation2D<double> Rotation2Dd;
/** Set \c *this from a 2x2 rotation matrix \a mat.
* In other words, this function extract the rotation angle
* from the rotation matrix.
*/
template<typename Scalar>
template<typename Derived>
EIGEN_DEVICE_FUNC Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
{
EIGEN_USING_STD_MATH(atan2)
EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0));
return *this;
}
/** Constructs and \returns an equivalent 2x2 rotation matrix.
*/
template<typename Scalar>
typename Rotation2D<Scalar>::Matrix2
EIGEN_DEVICE_FUNC Rotation2D<Scalar>::toRotationMatrix(void) const
{
EIGEN_USING_STD_MATH(sin)
EIGEN_USING_STD_MATH(cos)
Scalar sinA = sin(m_angle);
Scalar cosA = cos(m_angle);
return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
}
} // end namespace Eigen
#endif // EIGEN_ROTATION2D_H
| 6,877 | 33.39 | 149 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/RotationBase.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ROTATIONBASE_H
#define EIGEN_ROTATIONBASE_H
namespace Eigen {
// forward declaration
namespace internal {
template<typename RotationDerived, typename MatrixType, bool IsVector=MatrixType::IsVectorAtCompileTime>
struct rotation_base_generic_product_selector;
}
/** \class RotationBase
*
* \brief Common base class for compact rotation representations
*
* \tparam Derived is the derived type, i.e., a rotation type
* \tparam _Dim the dimension of the space
*/
template<typename Derived, int _Dim>
class RotationBase
{
public:
enum { Dim = _Dim };
/** the scalar type of the coefficients */
typedef typename internal::traits<Derived>::Scalar Scalar;
/** corresponding linear transformation matrix type */
typedef Matrix<Scalar,Dim,Dim> RotationMatrixType;
typedef Matrix<Scalar,Dim,1> VectorType;
public:
EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast<Derived*>(this); }
/** \returns an equivalent rotation matrix */
EIGEN_DEVICE_FUNC inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); }
/** \returns an equivalent rotation matrix
* This function is added to be conform with the Transform class' naming scheme.
*/
EIGEN_DEVICE_FUNC inline RotationMatrixType matrix() const { return derived().toRotationMatrix(); }
/** \returns the inverse rotation */
EIGEN_DEVICE_FUNC inline Derived inverse() const { return derived().inverse(); }
/** \returns the concatenation of the rotation \c *this with a translation \a t */
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Isometry> operator*(const Translation<Scalar,Dim>& t) const
{ return Transform<Scalar,Dim,Isometry>(*this) * t; }
/** \returns the concatenation of the rotation \c *this with a uniform scaling \a s */
EIGEN_DEVICE_FUNC inline RotationMatrixType operator*(const UniformScaling<Scalar>& s) const
{ return toRotationMatrix() * s.factor(); }
/** \returns the concatenation of the rotation \c *this with a generic expression \a e
* \a e can be:
* - a DimxDim linear transformation matrix
* - a DimxDim diagonal matrix (axis aligned scaling)
* - a vector of size Dim
*/
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::rotation_base_generic_product_selector<Derived,OtherDerived,OtherDerived::IsVectorAtCompileTime>::ReturnType
operator*(const EigenBase<OtherDerived>& e) const
{ return internal::rotation_base_generic_product_selector<Derived,OtherDerived>::run(derived(), e.derived()); }
/** \returns the concatenation of a linear transformation \a l with the rotation \a r */
template<typename OtherDerived> friend
EIGEN_DEVICE_FUNC inline RotationMatrixType operator*(const EigenBase<OtherDerived>& l, const Derived& r)
{ return l.derived() * r.toRotationMatrix(); }
/** \returns the concatenation of a scaling \a l with the rotation \a r */
EIGEN_DEVICE_FUNC friend inline Transform<Scalar,Dim,Affine> operator*(const DiagonalMatrix<Scalar,Dim>& l, const Derived& r)
{
Transform<Scalar,Dim,Affine> res(r);
res.linear().applyOnTheLeft(l);
return res;
}
/** \returns the concatenation of the rotation \c *this with a transformation \a t */
template<int Mode, int Options>
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode> operator*(const Transform<Scalar,Dim,Mode,Options>& t) const
{ return toRotationMatrix() * t; }
template<typename OtherVectorType>
EIGEN_DEVICE_FUNC inline VectorType _transformVector(const OtherVectorType& v) const
{ return toRotationMatrix() * v; }
};
namespace internal {
// implementation of the generic product rotation * matrix
template<typename RotationDerived, typename MatrixType>
struct rotation_base_generic_product_selector<RotationDerived,MatrixType,false>
{
enum { Dim = RotationDerived::Dim };
typedef Matrix<typename RotationDerived::Scalar,Dim,Dim> ReturnType;
EIGEN_DEVICE_FUNC static inline ReturnType run(const RotationDerived& r, const MatrixType& m)
{ return r.toRotationMatrix() * m; }
};
template<typename RotationDerived, typename Scalar, int Dim, int MaxDim>
struct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix<Scalar,Dim,MaxDim>, false >
{
typedef Transform<Scalar,Dim,Affine> ReturnType;
EIGEN_DEVICE_FUNC static inline ReturnType run(const RotationDerived& r, const DiagonalMatrix<Scalar,Dim,MaxDim>& m)
{
ReturnType res(r);
res.linear() *= m;
return res;
}
};
template<typename RotationDerived,typename OtherVectorType>
struct rotation_base_generic_product_selector<RotationDerived,OtherVectorType,true>
{
enum { Dim = RotationDerived::Dim };
typedef Matrix<typename RotationDerived::Scalar,Dim,1> ReturnType;
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE ReturnType run(const RotationDerived& r, const OtherVectorType& v)
{
return r._transformVector(v);
}
};
} // end namespace internal
/** \geometry_module
*
* \brief Constructs a Dim x Dim rotation matrix from the rotation \a r
*/
template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
::Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
{
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
*this = r.toRotationMatrix();
}
/** \geometry_module
*
* \brief Set a Dim x Dim rotation matrix from the rotation \a r
*/
template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>&
Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
::operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
{
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
return *this = r.toRotationMatrix();
}
namespace internal {
/** \internal
*
* Helper function to return an arbitrary rotation object to a rotation matrix.
*
* \tparam Scalar the numeric type of the matrix coefficients
* \tparam Dim the dimension of the current space
*
* It returns a Dim x Dim fixed size matrix.
*
* Default specializations are provided for:
* - any scalar type (2D),
* - any matrix expression,
* - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D)
*
* Currently toRotationMatrix is only used by Transform.
*
* \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis
*/
template<typename Scalar, int Dim>
EIGEN_DEVICE_FUNC static inline Matrix<Scalar,2,2> toRotationMatrix(const Scalar& s)
{
EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
return Rotation2D<Scalar>(s).toRotationMatrix();
}
template<typename Scalar, int Dim, typename OtherDerived>
EIGEN_DEVICE_FUNC static inline Matrix<Scalar,Dim,Dim> toRotationMatrix(const RotationBase<OtherDerived,Dim>& r)
{
return r.toRotationMatrix();
}
template<typename Scalar, int Dim, typename OtherDerived>
EIGEN_DEVICE_FUNC static inline const MatrixBase<OtherDerived>& toRotationMatrix(const MatrixBase<OtherDerived>& mat)
{
EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,
YOU_MADE_A_PROGRAMMING_MISTAKE)
return mat;
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_ROTATIONBASE_H
| 8,063 | 37.956522 | 169 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/Scaling.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SCALING_H
#define EIGEN_SCALING_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
* \class Scaling
*
* \brief Represents a generic uniform scaling transformation
*
* \tparam _Scalar the scalar type, i.e., the type of the coefficients.
*
* This class represent a uniform scaling transformation. It is the return
* type of Scaling(Scalar), and most of the time this is the only way it
* is used. In particular, this class is not aimed to be used to store a scaling transformation,
* but rather to make easier the constructions and updates of Transform objects.
*
* To represent an axis aligned scaling, use the DiagonalMatrix class.
*
* \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform
*/
template<typename _Scalar>
class UniformScaling
{
public:
/** the scalar type of the coefficients */
typedef _Scalar Scalar;
protected:
Scalar m_factor;
public:
/** Default constructor without initialization. */
UniformScaling() {}
/** Constructs and initialize a uniform scaling transformation */
explicit inline UniformScaling(const Scalar& s) : m_factor(s) {}
inline const Scalar& factor() const { return m_factor; }
inline Scalar& factor() { return m_factor; }
/** Concatenates two uniform scaling */
inline UniformScaling operator* (const UniformScaling& other) const
{ return UniformScaling(m_factor * other.factor()); }
/** Concatenates a uniform scaling and a translation */
template<int Dim>
inline Transform<Scalar,Dim,Affine> operator* (const Translation<Scalar,Dim>& t) const;
/** Concatenates a uniform scaling and an affine transformation */
template<int Dim, int Mode, int Options>
inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const
{
Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t;
res.prescale(factor());
return res;
}
/** Concatenates a uniform scaling and a linear transformation matrix */
// TODO returns an expression
template<typename Derived>
inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
{ return other * m_factor; }
template<typename Derived,int Dim>
inline Matrix<Scalar,Dim,Dim> operator*(const RotationBase<Derived,Dim>& r) const
{ return r.toRotationMatrix() * m_factor; }
/** \returns the inverse scaling */
inline UniformScaling inverse() const
{ return UniformScaling(Scalar(1)/m_factor); }
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
inline UniformScaling<NewScalarType> cast() const
{ return UniformScaling<NewScalarType>(NewScalarType(m_factor)); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
inline explicit UniformScaling(const UniformScaling<OtherScalarType>& other)
{ m_factor = Scalar(other.factor()); }
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
bool isApprox(const UniformScaling& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return internal::isApprox(m_factor, other.factor(), prec); }
};
/** \addtogroup Geometry_Module */
//@{
/** Concatenates a linear transformation matrix and a uniform scaling
* \relates UniformScaling
*/
// NOTE this operator is defiend in MatrixBase and not as a friend function
// of UniformScaling to fix an internal crash of Intel's ICC
template<typename Derived,typename Scalar>
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,Scalar,product)
operator*(const MatrixBase<Derived>& matrix, const UniformScaling<Scalar>& s)
{ return matrix.derived() * s.factor(); }
/** Constructs a uniform scaling from scale factor \a s */
inline UniformScaling<float> Scaling(float s) { return UniformScaling<float>(s); }
/** Constructs a uniform scaling from scale factor \a s */
inline UniformScaling<double> Scaling(double s) { return UniformScaling<double>(s); }
/** Constructs a uniform scaling from scale factor \a s */
template<typename RealScalar>
inline UniformScaling<std::complex<RealScalar> > Scaling(const std::complex<RealScalar>& s)
{ return UniformScaling<std::complex<RealScalar> >(s); }
/** Constructs a 2D axis aligned scaling */
template<typename Scalar>
inline DiagonalMatrix<Scalar,2> Scaling(const Scalar& sx, const Scalar& sy)
{ return DiagonalMatrix<Scalar,2>(sx, sy); }
/** Constructs a 3D axis aligned scaling */
template<typename Scalar>
inline DiagonalMatrix<Scalar,3> Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
{ return DiagonalMatrix<Scalar,3>(sx, sy, sz); }
/** Constructs an axis aligned scaling expression from vector expression \a coeffs
* This is an alias for coeffs.asDiagonal()
*/
template<typename Derived>
inline const DiagonalWrapper<const Derived> Scaling(const MatrixBase<Derived>& coeffs)
{ return coeffs.asDiagonal(); }
/** \deprecated */
typedef DiagonalMatrix<float, 2> AlignedScaling2f;
/** \deprecated */
typedef DiagonalMatrix<double,2> AlignedScaling2d;
/** \deprecated */
typedef DiagonalMatrix<float, 3> AlignedScaling3f;
/** \deprecated */
typedef DiagonalMatrix<double,3> AlignedScaling3d;
//@}
template<typename Scalar>
template<int Dim>
inline Transform<Scalar,Dim,Affine>
UniformScaling<Scalar>::operator* (const Translation<Scalar,Dim>& t) const
{
Transform<Scalar,Dim,Affine> res;
res.matrix().setZero();
res.linear().diagonal().fill(factor());
res.translation() = factor() * t.vector();
res(Dim,Dim) = Scalar(1);
return res;
}
} // end namespace Eigen
#endif // EIGEN_SCALING_H
| 6,324 | 35.988304 | 135 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/Transform.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Benoit Jacob <[email protected]>
// Copyright (C) 2010 Hauke Heibel <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TRANSFORM_H
#define EIGEN_TRANSFORM_H
namespace Eigen {
namespace internal {
template<typename Transform>
struct transform_traits
{
enum
{
Dim = Transform::Dim,
HDim = Transform::HDim,
Mode = Transform::Mode,
IsProjective = (int(Mode)==int(Projective))
};
};
template< typename TransformType,
typename MatrixType,
int Case = transform_traits<TransformType>::IsProjective ? 0
: int(MatrixType::RowsAtCompileTime) == int(transform_traits<TransformType>::HDim) ? 1
: 2,
int RhsCols = MatrixType::ColsAtCompileTime>
struct transform_right_product_impl;
template< typename Other,
int Mode,
int Options,
int Dim,
int HDim,
int OtherRows=Other::RowsAtCompileTime,
int OtherCols=Other::ColsAtCompileTime>
struct transform_left_product_impl;
template< typename Lhs,
typename Rhs,
bool AnyProjective =
transform_traits<Lhs>::IsProjective ||
transform_traits<Rhs>::IsProjective>
struct transform_transform_product_impl;
template< typename Other,
int Mode,
int Options,
int Dim,
int HDim,
int OtherRows=Other::RowsAtCompileTime,
int OtherCols=Other::ColsAtCompileTime>
struct transform_construct_from_matrix;
template<typename TransformType> struct transform_take_affine_part;
template<typename _Scalar, int _Dim, int _Mode, int _Options>
struct traits<Transform<_Scalar,_Dim,_Mode,_Options> >
{
typedef _Scalar Scalar;
typedef Eigen::Index StorageIndex;
typedef Dense StorageKind;
enum {
Dim1 = _Dim==Dynamic ? _Dim : _Dim + 1,
RowsAtCompileTime = _Mode==Projective ? Dim1 : _Dim,
ColsAtCompileTime = Dim1,
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
Flags = 0
};
};
template<int Mode> struct transform_make_affine;
} // end namespace internal
/** \geometry_module \ingroup Geometry_Module
*
* \class Transform
*
* \brief Represents an homogeneous transformation in a N dimensional space
*
* \tparam _Scalar the scalar type, i.e., the type of the coefficients
* \tparam _Dim the dimension of the space
* \tparam _Mode the type of the transformation. Can be:
* - #Affine: the transformation is stored as a (Dim+1)^2 matrix,
* where the last row is assumed to be [0 ... 0 1].
* - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.
* - #Projective: the transformation is stored as a (Dim+1)^2 matrix
* without any assumption.
* \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor.
* These Options are passed directly to the underlying matrix type.
*
* The homography is internally represented and stored by a matrix which
* is available through the matrix() method. To understand the behavior of
* this class you have to think a Transform object as its internal
* matrix representation. The chosen convention is right multiply:
*
* \code v' = T * v \endcode
*
* Therefore, an affine transformation matrix M is shaped like this:
*
* \f$ \left( \begin{array}{cc}
* linear & translation\\
* 0 ... 0 & 1
* \end{array} \right) \f$
*
* Note that for a projective transformation the last row can be anything,
* and then the interpretation of different parts might be sightly different.
*
* However, unlike a plain matrix, the Transform class provides many features
* simplifying both its assembly and usage. In particular, it can be composed
* with any other transformations (Transform,Translation,RotationBase,DiagonalMatrix)
* and can be directly used to transform implicit homogeneous vectors. All these
* operations are handled via the operator*. For the composition of transformations,
* its principle consists to first convert the right/left hand sides of the product
* to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
* Of course, internally, operator* tries to perform the minimal number of operations
* according to the nature of each terms. Likewise, when applying the transform
* to points, the latters are automatically promoted to homogeneous vectors
* before doing the matrix product. The conventions to homogeneous representations
* are performed as follow:
*
* \b Translation t (Dim)x(1):
* \f$ \left( \begin{array}{cc}
* I & t \\
* 0\,...\,0 & 1
* \end{array} \right) \f$
*
* \b Rotation R (Dim)x(Dim):
* \f$ \left( \begin{array}{cc}
* R & 0\\
* 0\,...\,0 & 1
* \end{array} \right) \f$
*<!--
* \b Linear \b Matrix L (Dim)x(Dim):
* \f$ \left( \begin{array}{cc}
* L & 0\\
* 0\,...\,0 & 1
* \end{array} \right) \f$
*
* \b Affine \b Matrix A (Dim)x(Dim+1):
* \f$ \left( \begin{array}{c}
* A\\
* 0\,...\,0\,1
* \end{array} \right) \f$
*-->
* \b Scaling \b DiagonalMatrix S (Dim)x(Dim):
* \f$ \left( \begin{array}{cc}
* S & 0\\
* 0\,...\,0 & 1
* \end{array} \right) \f$
*
* \b Column \b point v (Dim)x(1):
* \f$ \left( \begin{array}{c}
* v\\
* 1
* \end{array} \right) \f$
*
* \b Set \b of \b column \b points V1...Vn (Dim)x(n):
* \f$ \left( \begin{array}{ccc}
* v_1 & ... & v_n\\
* 1 & ... & 1
* \end{array} \right) \f$
*
* The concatenation of a Transform object with any kind of other transformation
* always returns a Transform object.
*
* A little exception to the "as pure matrix product" rule is the case of the
* transformation of non homogeneous vectors by an affine transformation. In
* that case the last matrix row can be ignored, and the product returns non
* homogeneous vectors.
*
* Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation,
* it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix.
* The solution is either to use a Dim x Dynamic matrix or explicitly request a
* vector transformation by making the vector homogeneous:
* \code
* m' = T * m.colwise().homogeneous();
* \endcode
* Note that there is zero overhead.
*
* Conversion methods from/to Qt's QMatrix and QTransform are available if the
* preprocessor token EIGEN_QT_SUPPORT is defined.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN.
*
* \sa class Matrix, class Quaternion
*/
template<typename _Scalar, int _Dim, int _Mode, int _Options>
class Transform
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
enum {
Mode = _Mode,
Options = _Options,
Dim = _Dim, ///< space dimension in which the transformation holds
HDim = _Dim+1, ///< size of a respective homogeneous vector
Rows = int(Mode)==(AffineCompact) ? Dim : HDim
};
/** the scalar type of the coefficients */
typedef _Scalar Scalar;
typedef Eigen::Index StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** type of the matrix used to represent the transformation */
typedef typename internal::make_proper_matrix_type<Scalar,Rows,HDim,Options>::type MatrixType;
/** constified MatrixType */
typedef const MatrixType ConstMatrixType;
/** type of the matrix used to represent the linear part of the transformation */
typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
/** type of read/write reference to the linear part of the transformation */
typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart;
/** type of read reference to the linear part of the transformation */
typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart;
/** type of read/write reference to the affine part of the transformation */
typedef typename internal::conditional<int(Mode)==int(AffineCompact),
MatrixType&,
Block<MatrixType,Dim,HDim> >::type AffinePart;
/** type of read reference to the affine part of the transformation */
typedef typename internal::conditional<int(Mode)==int(AffineCompact),
const MatrixType&,
const Block<const MatrixType,Dim,HDim> >::type ConstAffinePart;
/** type of a vector */
typedef Matrix<Scalar,Dim,1> VectorType;
/** type of a read/write reference to the translation part of the rotation */
typedef Block<MatrixType,Dim,1,!(internal::traits<MatrixType>::Flags & RowMajorBit)> TranslationPart;
/** type of a read reference to the translation part of the rotation */
typedef const Block<ConstMatrixType,Dim,1,!(internal::traits<MatrixType>::Flags & RowMajorBit)> ConstTranslationPart;
/** corresponding translation type */
typedef Translation<Scalar,Dim> TranslationType;
// this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0
enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) };
/** The return type of the product between a diagonal matrix and a transform */
typedef Transform<Scalar,Dim,TransformTimeDiagonalMode> TransformTimeDiagonalReturnType;
protected:
MatrixType m_matrix;
public:
/** Default constructor without initialization of the meaningful coefficients.
* If Mode==Affine, then the last row is set to [0 ... 0 1] */
EIGEN_DEVICE_FUNC inline Transform()
{
check_template_params();
internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix);
}
EIGEN_DEVICE_FUNC inline Transform(const Transform& other)
{
check_template_params();
m_matrix = other.m_matrix;
}
EIGEN_DEVICE_FUNC inline explicit Transform(const TranslationType& t)
{
check_template_params();
*this = t;
}
EIGEN_DEVICE_FUNC inline explicit Transform(const UniformScaling<Scalar>& s)
{
check_template_params();
*this = s;
}
template<typename Derived>
EIGEN_DEVICE_FUNC inline explicit Transform(const RotationBase<Derived, Dim>& r)
{
check_template_params();
*this = r;
}
EIGEN_DEVICE_FUNC inline Transform& operator=(const Transform& other)
{ m_matrix = other.m_matrix; return *this; }
typedef internal::transform_take_affine_part<Transform> take_affine_part;
/** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC inline explicit Transform(const EigenBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);
check_template_params();
internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
}
/** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC inline Transform& operator=(const EigenBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);
internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
return *this;
}
template<int OtherOptions>
EIGEN_DEVICE_FUNC inline Transform(const Transform<Scalar,Dim,Mode,OtherOptions>& other)
{
check_template_params();
// only the options change, we can directly copy the matrices
m_matrix = other.matrix();
}
template<int OtherMode,int OtherOptions>
EIGEN_DEVICE_FUNC inline Transform(const Transform<Scalar,Dim,OtherMode,OtherOptions>& other)
{
check_template_params();
// prevent conversions as:
// Affine | AffineCompact | Isometry = Projective
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)),
YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
// prevent conversions as:
// Isometry = Affine | AffineCompact
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)),
YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
enum { ModeIsAffineCompact = Mode == int(AffineCompact),
OtherModeIsAffineCompact = OtherMode == int(AffineCompact)
};
if(ModeIsAffineCompact == OtherModeIsAffineCompact)
{
// We need the block expression because the code is compiled for all
// combinations of transformations and will trigger a compile time error
// if one tries to assign the matrices directly
m_matrix.template block<Dim,Dim+1>(0,0) = other.matrix().template block<Dim,Dim+1>(0,0);
makeAffine();
}
else if(OtherModeIsAffineCompact)
{
typedef typename Transform<Scalar,Dim,OtherMode,OtherOptions>::MatrixType OtherMatrixType;
internal::transform_construct_from_matrix<OtherMatrixType,Mode,Options,Dim,HDim>::run(this, other.matrix());
}
else
{
// here we know that Mode == AffineCompact and OtherMode != AffineCompact.
// if OtherMode were Projective, the static assert above would already have caught it.
// So the only possibility is that OtherMode == Affine
linear() = other.linear();
translation() = other.translation();
}
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC Transform(const ReturnByValue<OtherDerived>& other)
{
check_template_params();
other.evalTo(*this);
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC Transform& operator=(const ReturnByValue<OtherDerived>& other)
{
other.evalTo(*this);
return *this;
}
#ifdef EIGEN_QT_SUPPORT
inline Transform(const QMatrix& other);
inline Transform& operator=(const QMatrix& other);
inline QMatrix toQMatrix(void) const;
inline Transform(const QTransform& other);
inline Transform& operator=(const QTransform& other);
inline QTransform toQTransform(void) const;
#endif
EIGEN_DEVICE_FUNC Index rows() const { return int(Mode)==int(Projective) ? m_matrix.cols() : (m_matrix.cols()-1); }
EIGEN_DEVICE_FUNC Index cols() const { return m_matrix.cols(); }
/** shortcut for m_matrix(row,col);
* \sa MatrixBase::operator(Index,Index) const */
EIGEN_DEVICE_FUNC inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); }
/** shortcut for m_matrix(row,col);
* \sa MatrixBase::operator(Index,Index) */
EIGEN_DEVICE_FUNC inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); }
/** \returns a read-only expression of the transformation matrix */
EIGEN_DEVICE_FUNC inline const MatrixType& matrix() const { return m_matrix; }
/** \returns a writable expression of the transformation matrix */
EIGEN_DEVICE_FUNC inline MatrixType& matrix() { return m_matrix; }
/** \returns a read-only expression of the linear part of the transformation */
EIGEN_DEVICE_FUNC inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); }
/** \returns a writable expression of the linear part of the transformation */
EIGEN_DEVICE_FUNC inline LinearPart linear() { return LinearPart(m_matrix,0,0); }
/** \returns a read-only expression of the Dim x HDim affine part of the transformation */
EIGEN_DEVICE_FUNC inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); }
/** \returns a writable expression of the Dim x HDim affine part of the transformation */
EIGEN_DEVICE_FUNC inline AffinePart affine() { return take_affine_part::run(m_matrix); }
/** \returns a read-only expression of the translation vector of the transformation */
EIGEN_DEVICE_FUNC inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); }
/** \returns a writable expression of the translation vector of the transformation */
EIGEN_DEVICE_FUNC inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); }
/** \returns an expression of the product between the transform \c *this and a matrix expression \a other.
*
* The right-hand-side \a other can be either:
* \li an homogeneous vector of size Dim+1,
* \li a set of homogeneous vectors of size Dim+1 x N,
* \li a transformation matrix of size Dim+1 x Dim+1.
*
* Moreover, if \c *this represents an affine transformation (i.e., Mode!=Projective), then \a other can also be:
* \li a point of size Dim (computes: \code this->linear() * other + this->translation()\endcode),
* \li a set of N points as a Dim x N matrix (computes: \code (this->linear() * other).colwise() + this->translation()\endcode),
*
* In all cases, the return type is a matrix or vector of same sizes as the right-hand-side \a other.
*
* If you want to interpret \a other as a linear or affine transformation, then first convert it to a Transform<> type,
* or do your own cooking.
*
* Finally, if you want to apply Affine transformations to vectors, then explicitly apply the linear part only:
* \code
* Affine3f A;
* Vector3f v1, v2;
* v2 = A.linear() * v1;
* \endcode
*
*/
// note: this function is defined here because some compilers cannot find the respective declaration
template<typename OtherDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl<Transform, OtherDerived>::ResultType
operator * (const EigenBase<OtherDerived> &other) const
{ return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); }
/** \returns the product expression of a transformation matrix \a a times a transform \a b
*
* The left hand side \a other can be either:
* \li a linear transformation matrix of size Dim x Dim,
* \li an affine transformation matrix of size Dim x Dim+1,
* \li a general transformation matrix of size Dim+1 x Dim+1.
*/
template<typename OtherDerived> friend
EIGEN_DEVICE_FUNC inline const typename internal::transform_left_product_impl<OtherDerived,Mode,Options,_Dim,_Dim+1>::ResultType
operator * (const EigenBase<OtherDerived> &a, const Transform &b)
{ return internal::transform_left_product_impl<OtherDerived,Mode,Options,Dim,HDim>::run(a.derived(),b); }
/** \returns The product expression of a transform \a a times a diagonal matrix \a b
*
* The rhs diagonal matrix is interpreted as an affine scaling transformation. The
* product results in a Transform of the same type (mode) as the lhs only if the lhs
* mode is no isometry. In that case, the returned transform is an affinity.
*/
template<typename DiagonalDerived>
EIGEN_DEVICE_FUNC inline const TransformTimeDiagonalReturnType
operator * (const DiagonalBase<DiagonalDerived> &b) const
{
TransformTimeDiagonalReturnType res(*this);
res.linearExt() *= b;
return res;
}
/** \returns The product expression of a diagonal matrix \a a times a transform \a b
*
* The lhs diagonal matrix is interpreted as an affine scaling transformation. The
* product results in a Transform of the same type (mode) as the lhs only if the lhs
* mode is no isometry. In that case, the returned transform is an affinity.
*/
template<typename DiagonalDerived>
EIGEN_DEVICE_FUNC friend inline TransformTimeDiagonalReturnType
operator * (const DiagonalBase<DiagonalDerived> &a, const Transform &b)
{
TransformTimeDiagonalReturnType res;
res.linear().noalias() = a*b.linear();
res.translation().noalias() = a*b.translation();
if (Mode!=int(AffineCompact))
res.matrix().row(Dim) = b.matrix().row(Dim);
return res;
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC inline Transform& operator*=(const EigenBase<OtherDerived>& other) { return *this = *this * other; }
/** Concatenates two transformations */
EIGEN_DEVICE_FUNC inline const Transform operator * (const Transform& other) const
{
return internal::transform_transform_product_impl<Transform,Transform>::run(*this,other);
}
#if EIGEN_COMP_ICC
private:
// this intermediate structure permits to workaround a bug in ICC 11:
// error: template instantiation resulted in unexpected function type of "Eigen::Transform<double, 3, 32, 0>
// (const Eigen::Transform<double, 3, 2, 0> &) const"
// (the meaning of a name may have changed since the template declaration -- the type of the template is:
// "Eigen::internal::transform_transform_product_impl<Eigen::Transform<double, 3, 32, 0>,
// Eigen::Transform<double, 3, Mode, Options>, <expression>>::ResultType (const Eigen::Transform<double, 3, Mode, Options> &) const")
//
template<int OtherMode,int OtherOptions> struct icc_11_workaround
{
typedef internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> > ProductType;
typedef typename ProductType::ResultType ResultType;
};
public:
/** Concatenates two different transformations */
template<int OtherMode,int OtherOptions>
inline typename icc_11_workaround<OtherMode,OtherOptions>::ResultType
operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const
{
typedef typename icc_11_workaround<OtherMode,OtherOptions>::ProductType ProductType;
return ProductType::run(*this,other);
}
#else
/** Concatenates two different transformations */
template<int OtherMode,int OtherOptions>
EIGEN_DEVICE_FUNC inline typename internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::ResultType
operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const
{
return internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::run(*this,other);
}
#endif
/** \sa MatrixBase::setIdentity() */
EIGEN_DEVICE_FUNC void setIdentity() { m_matrix.setIdentity(); }
/**
* \brief Returns an identity transformation.
* \todo In the future this function should be returning a Transform expression.
*/
EIGEN_DEVICE_FUNC static const Transform Identity()
{
return Transform(MatrixType::Identity());
}
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline Transform& scale(const MatrixBase<OtherDerived> &other);
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline Transform& prescale(const MatrixBase<OtherDerived> &other);
EIGEN_DEVICE_FUNC inline Transform& scale(const Scalar& s);
EIGEN_DEVICE_FUNC inline Transform& prescale(const Scalar& s);
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline Transform& translate(const MatrixBase<OtherDerived> &other);
template<typename OtherDerived>
EIGEN_DEVICE_FUNC
inline Transform& pretranslate(const MatrixBase<OtherDerived> &other);
template<typename RotationType>
EIGEN_DEVICE_FUNC
inline Transform& rotate(const RotationType& rotation);
template<typename RotationType>
EIGEN_DEVICE_FUNC
inline Transform& prerotate(const RotationType& rotation);
EIGEN_DEVICE_FUNC Transform& shear(const Scalar& sx, const Scalar& sy);
EIGEN_DEVICE_FUNC Transform& preshear(const Scalar& sx, const Scalar& sy);
EIGEN_DEVICE_FUNC inline Transform& operator=(const TranslationType& t);
EIGEN_DEVICE_FUNC
inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
EIGEN_DEVICE_FUNC inline Transform operator*(const TranslationType& t) const;
EIGEN_DEVICE_FUNC
inline Transform& operator=(const UniformScaling<Scalar>& t);
EIGEN_DEVICE_FUNC
inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
EIGEN_DEVICE_FUNC
inline TransformTimeDiagonalReturnType operator*(const UniformScaling<Scalar>& s) const
{
TransformTimeDiagonalReturnType res = *this;
res.scale(s.factor());
return res;
}
EIGEN_DEVICE_FUNC
inline Transform& operator*=(const DiagonalMatrix<Scalar,Dim>& s) { linearExt() *= s; return *this; }
template<typename Derived>
EIGEN_DEVICE_FUNC inline Transform& operator=(const RotationBase<Derived,Dim>& r);
template<typename Derived>
EIGEN_DEVICE_FUNC inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); }
template<typename Derived>
EIGEN_DEVICE_FUNC inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
EIGEN_DEVICE_FUNC const LinearMatrixType rotation() const;
template<typename RotationMatrixType, typename ScalingMatrixType>
EIGEN_DEVICE_FUNC
void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
template<typename ScalingMatrixType, typename RotationMatrixType>
EIGEN_DEVICE_FUNC
void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const;
template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
EIGEN_DEVICE_FUNC
Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);
EIGEN_DEVICE_FUNC
inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const;
/** \returns a const pointer to the column major internal matrix */
EIGEN_DEVICE_FUNC const Scalar* data() const { return m_matrix.data(); }
/** \returns a non-const pointer to the column major internal matrix */
EIGEN_DEVICE_FUNC Scalar* data() { return m_matrix.data(); }
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type cast() const
{ return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
EIGEN_DEVICE_FUNC inline explicit Transform(const Transform<OtherScalarType,Dim,Mode,Options>& other)
{
check_template_params();
m_matrix = other.matrix().template cast<Scalar>();
}
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
EIGEN_DEVICE_FUNC bool isApprox(const Transform& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_matrix.isApprox(other.m_matrix, prec); }
/** Sets the last row to [0 ... 0 1]
*/
EIGEN_DEVICE_FUNC void makeAffine()
{
internal::transform_make_affine<int(Mode)>::run(m_matrix);
}
/** \internal
* \returns the Dim x Dim linear part if the transformation is affine,
* and the HDim x Dim part for projective transformations.
*/
EIGEN_DEVICE_FUNC inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt()
{ return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
/** \internal
* \returns the Dim x Dim linear part if the transformation is affine,
* and the HDim x Dim part for projective transformations.
*/
EIGEN_DEVICE_FUNC inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt() const
{ return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
/** \internal
* \returns the translation part if the transformation is affine,
* and the last column for projective transformations.
*/
EIGEN_DEVICE_FUNC inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt()
{ return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
/** \internal
* \returns the translation part if the transformation is affine,
* and the last column for projective transformations.
*/
EIGEN_DEVICE_FUNC inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt() const
{ return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
#ifdef EIGEN_TRANSFORM_PLUGIN
#include EIGEN_TRANSFORM_PLUGIN
#endif
protected:
#ifndef EIGEN_PARSED_BY_DOXYGEN
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void check_template_params()
{
EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
}
#endif
};
/** \ingroup Geometry_Module */
typedef Transform<float,2,Isometry> Isometry2f;
/** \ingroup Geometry_Module */
typedef Transform<float,3,Isometry> Isometry3f;
/** \ingroup Geometry_Module */
typedef Transform<double,2,Isometry> Isometry2d;
/** \ingroup Geometry_Module */
typedef Transform<double,3,Isometry> Isometry3d;
/** \ingroup Geometry_Module */
typedef Transform<float,2,Affine> Affine2f;
/** \ingroup Geometry_Module */
typedef Transform<float,3,Affine> Affine3f;
/** \ingroup Geometry_Module */
typedef Transform<double,2,Affine> Affine2d;
/** \ingroup Geometry_Module */
typedef Transform<double,3,Affine> Affine3d;
/** \ingroup Geometry_Module */
typedef Transform<float,2,AffineCompact> AffineCompact2f;
/** \ingroup Geometry_Module */
typedef Transform<float,3,AffineCompact> AffineCompact3f;
/** \ingroup Geometry_Module */
typedef Transform<double,2,AffineCompact> AffineCompact2d;
/** \ingroup Geometry_Module */
typedef Transform<double,3,AffineCompact> AffineCompact3d;
/** \ingroup Geometry_Module */
typedef Transform<float,2,Projective> Projective2f;
/** \ingroup Geometry_Module */
typedef Transform<float,3,Projective> Projective3f;
/** \ingroup Geometry_Module */
typedef Transform<double,2,Projective> Projective2d;
/** \ingroup Geometry_Module */
typedef Transform<double,3,Projective> Projective3d;
/**************************
*** Optional QT support ***
**************************/
#ifdef EIGEN_QT_SUPPORT
/** Initializes \c *this from a QMatrix assuming the dimension is 2.
*
* This function is available only if the token EIGEN_QT_SUPPORT is defined.
*/
template<typename Scalar, int Dim, int Mode,int Options>
Transform<Scalar,Dim,Mode,Options>::Transform(const QMatrix& other)
{
check_template_params();
*this = other;
}
/** Set \c *this from a QMatrix assuming the dimension is 2.
*
* This function is available only if the token EIGEN_QT_SUPPORT is defined.
*/
template<typename Scalar, int Dim, int Mode,int Options>
Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QMatrix& other)
{
EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
if (Mode == int(AffineCompact))
m_matrix << other.m11(), other.m21(), other.dx(),
other.m12(), other.m22(), other.dy();
else
m_matrix << other.m11(), other.m21(), other.dx(),
other.m12(), other.m22(), other.dy(),
0, 0, 1;
return *this;
}
/** \returns a QMatrix from \c *this assuming the dimension is 2.
*
* \warning this conversion might loss data if \c *this is not affine
*
* This function is available only if the token EIGEN_QT_SUPPORT is defined.
*/
template<typename Scalar, int Dim, int Mode, int Options>
QMatrix Transform<Scalar,Dim,Mode,Options>::toQMatrix(void) const
{
check_template_params();
EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
m_matrix.coeff(0,1), m_matrix.coeff(1,1),
m_matrix.coeff(0,2), m_matrix.coeff(1,2));
}
/** Initializes \c *this from a QTransform assuming the dimension is 2.
*
* This function is available only if the token EIGEN_QT_SUPPORT is defined.
*/
template<typename Scalar, int Dim, int Mode,int Options>
Transform<Scalar,Dim,Mode,Options>::Transform(const QTransform& other)
{
check_template_params();
*this = other;
}
/** Set \c *this from a QTransform assuming the dimension is 2.
*
* This function is available only if the token EIGEN_QT_SUPPORT is defined.
*/
template<typename Scalar, int Dim, int Mode, int Options>
Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QTransform& other)
{
check_template_params();
EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
if (Mode == int(AffineCompact))
m_matrix << other.m11(), other.m21(), other.dx(),
other.m12(), other.m22(), other.dy();
else
m_matrix << other.m11(), other.m21(), other.dx(),
other.m12(), other.m22(), other.dy(),
other.m13(), other.m23(), other.m33();
return *this;
}
/** \returns a QTransform from \c *this assuming the dimension is 2.
*
* This function is available only if the token EIGEN_QT_SUPPORT is defined.
*/
template<typename Scalar, int Dim, int Mode, int Options>
QTransform Transform<Scalar,Dim,Mode,Options>::toQTransform(void) const
{
EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
if (Mode == int(AffineCompact))
return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
m_matrix.coeff(0,1), m_matrix.coeff(1,1),
m_matrix.coeff(0,2), m_matrix.coeff(1,2));
else
return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0),
m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1),
m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2));
}
#endif
/*********************
*** Procedural API ***
*********************/
/** Applies on the right the non uniform scale transformation represented
* by the vector \a other to \c *this and returns a reference to \c *this.
* \sa prescale()
*/
template<typename Scalar, int Dim, int Mode, int Options>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&
Transform<Scalar,Dim,Mode,Options>::scale(const MatrixBase<OtherDerived> &other)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
linearExt().noalias() = (linearExt() * other.asDiagonal());
return *this;
}
/** Applies on the right a uniform scale of a factor \a c to \c *this
* and returns a reference to \c *this.
* \sa prescale(Scalar)
*/
template<typename Scalar, int Dim, int Mode, int Options>
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(const Scalar& s)
{
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
linearExt() *= s;
return *this;
}
/** Applies on the left the non uniform scale transformation represented
* by the vector \a other to \c *this and returns a reference to \c *this.
* \sa scale()
*/
template<typename Scalar, int Dim, int Mode, int Options>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&
Transform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &other)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
affine().noalias() = (other.asDiagonal() * affine());
return *this;
}
/** Applies on the left a uniform scale of a factor \a c to \c *this
* and returns a reference to \c *this.
* \sa scale(Scalar)
*/
template<typename Scalar, int Dim, int Mode, int Options>
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(const Scalar& s)
{
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
m_matrix.template topRows<Dim>() *= s;
return *this;
}
/** Applies on the right the translation matrix represented by the vector \a other
* to \c *this and returns a reference to \c *this.
* \sa pretranslate()
*/
template<typename Scalar, int Dim, int Mode, int Options>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&
Transform<Scalar,Dim,Mode,Options>::translate(const MatrixBase<OtherDerived> &other)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
translationExt() += linearExt() * other;
return *this;
}
/** Applies on the left the translation matrix represented by the vector \a other
* to \c *this and returns a reference to \c *this.
* \sa translate()
*/
template<typename Scalar, int Dim, int Mode, int Options>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&
Transform<Scalar,Dim,Mode,Options>::pretranslate(const MatrixBase<OtherDerived> &other)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
if(int(Mode)==int(Projective))
affine() += other * m_matrix.row(Dim);
else
translation() += other;
return *this;
}
/** Applies on the right the rotation represented by the rotation \a rotation
* to \c *this and returns a reference to \c *this.
*
* The template parameter \a RotationType is the type of the rotation which
* must be known by internal::toRotationMatrix<>.
*
* Natively supported types includes:
* - any scalar (2D),
* - a Dim x Dim matrix expression,
* - a Quaternion (3D),
* - a AngleAxis (3D)
*
* This mechanism is easily extendable to support user types such as Euler angles,
* or a pair of Quaternion for 4D rotations.
*
* \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType)
*/
template<typename Scalar, int Dim, int Mode, int Options>
template<typename RotationType>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&
Transform<Scalar,Dim,Mode,Options>::rotate(const RotationType& rotation)
{
linearExt() *= internal::toRotationMatrix<Scalar,Dim>(rotation);
return *this;
}
/** Applies on the left the rotation represented by the rotation \a rotation
* to \c *this and returns a reference to \c *this.
*
* See rotate() for further details.
*
* \sa rotate()
*/
template<typename Scalar, int Dim, int Mode, int Options>
template<typename RotationType>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&
Transform<Scalar,Dim,Mode,Options>::prerotate(const RotationType& rotation)
{
m_matrix.template block<Dim,HDim>(0,0) = internal::toRotationMatrix<Scalar,Dim>(rotation)
* m_matrix.template block<Dim,HDim>(0,0);
return *this;
}
/** Applies on the right the shear transformation represented
* by the vector \a other to \c *this and returns a reference to \c *this.
* \warning 2D only.
* \sa preshear()
*/
template<typename Scalar, int Dim, int Mode, int Options>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&
Transform<Scalar,Dim,Mode,Options>::shear(const Scalar& sx, const Scalar& sy)
{
EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
VectorType tmp = linear().col(0)*sy + linear().col(1);
linear() << linear().col(0) + linear().col(1)*sx, tmp;
return *this;
}
/** Applies on the left the shear transformation represented
* by the vector \a other to \c *this and returns a reference to \c *this.
* \warning 2D only.
* \sa shear()
*/
template<typename Scalar, int Dim, int Mode, int Options>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&
Transform<Scalar,Dim,Mode,Options>::preshear(const Scalar& sx, const Scalar& sy)
{
EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);
return *this;
}
/******************************************************
*** Scaling, Translation and Rotation compatibility ***
******************************************************/
template<typename Scalar, int Dim, int Mode, int Options>
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const TranslationType& t)
{
linear().setIdentity();
translation() = t.vector();
makeAffine();
return *this;
}
template<typename Scalar, int Dim, int Mode, int Options>
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const TranslationType& t) const
{
Transform res = *this;
res.translate(t.vector());
return res;
}
template<typename Scalar, int Dim, int Mode, int Options>
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const UniformScaling<Scalar>& s)
{
m_matrix.setZero();
linear().diagonal().fill(s.factor());
makeAffine();
return *this;
}
template<typename Scalar, int Dim, int Mode, int Options>
template<typename Derived>
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const RotationBase<Derived,Dim>& r)
{
linear() = internal::toRotationMatrix<Scalar,Dim>(r);
translation().setZero();
makeAffine();
return *this;
}
template<typename Scalar, int Dim, int Mode, int Options>
template<typename Derived>
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const RotationBase<Derived,Dim>& r) const
{
Transform res = *this;
res.rotate(r.derived());
return res;
}
/************************
*** Special functions ***
************************/
/** \returns the rotation part of the transformation
*
*
* \svd_module
*
* \sa computeRotationScaling(), computeScalingRotation(), class SVD
*/
template<typename Scalar, int Dim, int Mode, int Options>
EIGEN_DEVICE_FUNC const typename Transform<Scalar,Dim,Mode,Options>::LinearMatrixType
Transform<Scalar,Dim,Mode,Options>::rotation() const
{
LinearMatrixType result;
computeRotationScaling(&result, (LinearMatrixType*)0);
return result;
}
/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
* not necessarily positive.
*
* If either pointer is zero, the corresponding computation is skipped.
*
*
*
* \svd_module
*
* \sa computeScalingRotation(), rotation(), class SVD
*/
template<typename Scalar, int Dim, int Mode, int Options>
template<typename RotationMatrixType, typename ScalingMatrixType>
EIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
{
JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
VectorType sv(svd.singularValues());
sv.coeffRef(0) *= x;
if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint());
if(rotation)
{
LinearMatrixType m(svd.matrixU());
m.col(0) /= x;
rotation->lazyAssign(m * svd.matrixV().adjoint());
}
}
/** decomposes the linear part of the transformation as a product scaling x rotation, the scaling being
* not necessarily positive.
*
* If either pointer is zero, the corresponding computation is skipped.
*
*
*
* \svd_module
*
* \sa computeRotationScaling(), rotation(), class SVD
*/
template<typename Scalar, int Dim, int Mode, int Options>
template<typename ScalingMatrixType, typename RotationMatrixType>
EIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
{
JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
VectorType sv(svd.singularValues());
sv.coeffRef(0) *= x;
if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint());
if(rotation)
{
LinearMatrixType m(svd.matrixU());
m.col(0) /= x;
rotation->lazyAssign(m * svd.matrixV().adjoint());
}
}
/** Convenient method to set \c *this from a position, orientation and scale
* of a 3D object.
*/
template<typename Scalar, int Dim, int Mode, int Options>
template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&
Transform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)
{
linear() = internal::toRotationMatrix<Scalar,Dim>(orientation);
linear() *= scale.asDiagonal();
translation() = position;
makeAffine();
return *this;
}
namespace internal {
template<int Mode>
struct transform_make_affine
{
template<typename MatrixType>
EIGEN_DEVICE_FUNC static void run(MatrixType &mat)
{
static const int Dim = MatrixType::ColsAtCompileTime-1;
mat.template block<1,Dim>(Dim,0).setZero();
mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1);
}
};
template<>
struct transform_make_affine<AffineCompact>
{
template<typename MatrixType> EIGEN_DEVICE_FUNC static void run(MatrixType &) { }
};
// selector needed to avoid taking the inverse of a 3x4 matrix
template<typename TransformType, int Mode=TransformType::Mode>
struct projective_transform_inverse
{
EIGEN_DEVICE_FUNC static inline void run(const TransformType&, TransformType&)
{}
};
template<typename TransformType>
struct projective_transform_inverse<TransformType, Projective>
{
EIGEN_DEVICE_FUNC static inline void run(const TransformType& m, TransformType& res)
{
res.matrix() = m.matrix().inverse();
}
};
} // end namespace internal
/**
*
* \returns the inverse transformation according to some given knowledge
* on \c *this.
*
* \param hint allows to optimize the inversion process when the transformation
* is known to be not a general transformation (optional). The possible values are:
* - #Projective if the transformation is not necessarily affine, i.e., if the
* last row is not guaranteed to be [0 ... 0 1]
* - #Affine if the last row can be assumed to be [0 ... 0 1]
* - #Isometry if the transformation is only a concatenations of translations
* and rotations.
* The default is the template class parameter \c Mode.
*
* \warning unless \a traits is always set to NoShear or NoScaling, this function
* requires the generic inverse method of MatrixBase defined in the LU module. If
* you forget to include this module, then you will get hard to debug linking errors.
*
* \sa MatrixBase::inverse()
*/
template<typename Scalar, int Dim, int Mode, int Options>
EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>
Transform<Scalar,Dim,Mode,Options>::inverse(TransformTraits hint) const
{
Transform res;
if (hint == Projective)
{
internal::projective_transform_inverse<Transform>::run(*this, res);
}
else
{
if (hint == Isometry)
{
res.matrix().template topLeftCorner<Dim,Dim>() = linear().transpose();
}
else if(hint&Affine)
{
res.matrix().template topLeftCorner<Dim,Dim>() = linear().inverse();
}
else
{
eigen_assert(false && "Invalid transform traits in Transform::Inverse");
}
// translation and remaining parts
res.matrix().template topRightCorner<Dim,1>()
= - res.matrix().template topLeftCorner<Dim,Dim>() * translation();
res.makeAffine(); // we do need this, because in the beginning res is uninitialized
}
return res;
}
namespace internal {
/*****************************************************
*** Specializations of take affine part ***
*****************************************************/
template<typename TransformType> struct transform_take_affine_part {
typedef typename TransformType::MatrixType MatrixType;
typedef typename TransformType::AffinePart AffinePart;
typedef typename TransformType::ConstAffinePart ConstAffinePart;
static inline AffinePart run(MatrixType& m)
{ return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
static inline ConstAffinePart run(const MatrixType& m)
{ return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
};
template<typename Scalar, int Dim, int Options>
struct transform_take_affine_part<Transform<Scalar,Dim,AffineCompact, Options> > {
typedef typename Transform<Scalar,Dim,AffineCompact,Options>::MatrixType MatrixType;
static inline MatrixType& run(MatrixType& m) { return m; }
static inline const MatrixType& run(const MatrixType& m) { return m; }
};
/*****************************************************
*** Specializations of construct from matrix ***
*****************************************************/
template<typename Other, int Mode, int Options, int Dim, int HDim>
struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,Dim>
{
static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
{
transform->linear() = other;
transform->translation().setZero();
transform->makeAffine();
}
};
template<typename Other, int Mode, int Options, int Dim, int HDim>
struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,HDim>
{
static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
{
transform->affine() = other;
transform->makeAffine();
}
};
template<typename Other, int Mode, int Options, int Dim, int HDim>
struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, HDim,HDim>
{
static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
{ transform->matrix() = other; }
};
template<typename Other, int Options, int Dim, int HDim>
struct transform_construct_from_matrix<Other, AffineCompact,Options,Dim,HDim, HDim,HDim>
{
static inline void run(Transform<typename Other::Scalar,Dim,AffineCompact,Options> *transform, const Other& other)
{ transform->matrix() = other.template block<Dim,HDim>(0,0); }
};
/**********************************************************
*** Specializations of operator* with rhs EigenBase ***
**********************************************************/
template<int LhsMode,int RhsMode>
struct transform_product_result
{
enum
{
Mode =
(LhsMode == (int)Projective || RhsMode == (int)Projective ) ? Projective :
(LhsMode == (int)Affine || RhsMode == (int)Affine ) ? Affine :
(LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact :
(LhsMode == (int)Isometry || RhsMode == (int)Isometry ) ? Isometry : Projective
};
};
template< typename TransformType, typename MatrixType, int RhsCols>
struct transform_right_product_impl< TransformType, MatrixType, 0, RhsCols>
{
typedef typename MatrixType::PlainObject ResultType;
static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
{
return T.matrix() * other;
}
};
template< typename TransformType, typename MatrixType, int RhsCols>
struct transform_right_product_impl< TransformType, MatrixType, 1, RhsCols>
{
enum {
Dim = TransformType::Dim,
HDim = TransformType::HDim,
OtherRows = MatrixType::RowsAtCompileTime,
OtherCols = MatrixType::ColsAtCompileTime
};
typedef typename MatrixType::PlainObject ResultType;
static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
{
EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
typedef Block<ResultType, Dim, OtherCols, int(MatrixType::RowsAtCompileTime)==Dim> TopLeftLhs;
ResultType res(other.rows(),other.cols());
TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other;
res.row(OtherRows-1) = other.row(OtherRows-1);
return res;
}
};
template< typename TransformType, typename MatrixType, int RhsCols>
struct transform_right_product_impl< TransformType, MatrixType, 2, RhsCols>
{
enum {
Dim = TransformType::Dim,
HDim = TransformType::HDim,
OtherRows = MatrixType::RowsAtCompileTime,
OtherCols = MatrixType::ColsAtCompileTime
};
typedef typename MatrixType::PlainObject ResultType;
static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
{
EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
typedef Block<ResultType, Dim, OtherCols, true> TopLeftLhs;
ResultType res(Replicate<typename TransformType::ConstTranslationPart, 1, OtherCols>(T.translation(),1,other.cols()));
TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other;
return res;
}
};
template< typename TransformType, typename MatrixType >
struct transform_right_product_impl< TransformType, MatrixType, 2, 1> // rhs is a vector of size Dim
{
typedef typename TransformType::MatrixType TransformMatrix;
enum {
Dim = TransformType::Dim,
HDim = TransformType::HDim,
OtherRows = MatrixType::RowsAtCompileTime,
WorkingRows = EIGEN_PLAIN_ENUM_MIN(TransformMatrix::RowsAtCompileTime,HDim)
};
typedef typename MatrixType::PlainObject ResultType;
static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
{
EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
Matrix<typename ResultType::Scalar, Dim+1, 1> rhs;
rhs.template head<Dim>() = other; rhs[Dim] = typename ResultType::Scalar(1);
Matrix<typename ResultType::Scalar, WorkingRows, 1> res(T.matrix() * rhs);
return res.template head<Dim>();
}
};
/**********************************************************
*** Specializations of operator* with lhs EigenBase ***
**********************************************************/
// generic HDim x HDim matrix * T => Projective
template<typename Other,int Mode, int Options, int Dim, int HDim>
struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, HDim,HDim>
{
typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
typedef typename TransformType::MatrixType MatrixType;
typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
static ResultType run(const Other& other,const TransformType& tr)
{ return ResultType(other * tr.matrix()); }
};
// generic HDim x HDim matrix * AffineCompact => Projective
template<typename Other, int Options, int Dim, int HDim>
struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, HDim,HDim>
{
typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
typedef typename TransformType::MatrixType MatrixType;
typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
static ResultType run(const Other& other,const TransformType& tr)
{
ResultType res;
res.matrix().noalias() = other.template block<HDim,Dim>(0,0) * tr.matrix();
res.matrix().col(Dim) += other.col(Dim);
return res;
}
};
// affine matrix * T
template<typename Other,int Mode, int Options, int Dim, int HDim>
struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,HDim>
{
typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
typedef typename TransformType::MatrixType MatrixType;
typedef TransformType ResultType;
static ResultType run(const Other& other,const TransformType& tr)
{
ResultType res;
res.affine().noalias() = other * tr.matrix();
res.matrix().row(Dim) = tr.matrix().row(Dim);
return res;
}
};
// affine matrix * AffineCompact
template<typename Other, int Options, int Dim, int HDim>
struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, Dim,HDim>
{
typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
typedef typename TransformType::MatrixType MatrixType;
typedef TransformType ResultType;
static ResultType run(const Other& other,const TransformType& tr)
{
ResultType res;
res.matrix().noalias() = other.template block<Dim,Dim>(0,0) * tr.matrix();
res.translation() += other.col(Dim);
return res;
}
};
// linear matrix * T
template<typename Other,int Mode, int Options, int Dim, int HDim>
struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,Dim>
{
typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
typedef typename TransformType::MatrixType MatrixType;
typedef TransformType ResultType;
static ResultType run(const Other& other, const TransformType& tr)
{
TransformType res;
if(Mode!=int(AffineCompact))
res.matrix().row(Dim) = tr.matrix().row(Dim);
res.matrix().template topRows<Dim>().noalias()
= other * tr.matrix().template topRows<Dim>();
return res;
}
};
/**********************************************************
*** Specializations of operator* with another Transform ***
**********************************************************/
template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,false >
{
enum { ResultMode = transform_product_result<LhsMode,RhsMode>::Mode };
typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
typedef Transform<Scalar,Dim,ResultMode,LhsOptions> ResultType;
static ResultType run(const Lhs& lhs, const Rhs& rhs)
{
ResultType res;
res.linear() = lhs.linear() * rhs.linear();
res.translation() = lhs.linear() * rhs.translation() + lhs.translation();
res.makeAffine();
return res;
}
};
template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,true >
{
typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
typedef Transform<Scalar,Dim,Projective> ResultType;
static ResultType run(const Lhs& lhs, const Rhs& rhs)
{
return ResultType( lhs.matrix() * rhs.matrix() );
}
};
template<typename Scalar, int Dim, int LhsOptions, int RhsOptions>
struct transform_transform_product_impl<Transform<Scalar,Dim,AffineCompact,LhsOptions>,Transform<Scalar,Dim,Projective,RhsOptions>,true >
{
typedef Transform<Scalar,Dim,AffineCompact,LhsOptions> Lhs;
typedef Transform<Scalar,Dim,Projective,RhsOptions> Rhs;
typedef Transform<Scalar,Dim,Projective> ResultType;
static ResultType run(const Lhs& lhs, const Rhs& rhs)
{
ResultType res;
res.matrix().template topRows<Dim>() = lhs.matrix() * rhs.matrix();
res.matrix().row(Dim) = rhs.matrix().row(Dim);
return res;
}
};
template<typename Scalar, int Dim, int LhsOptions, int RhsOptions>
struct transform_transform_product_impl<Transform<Scalar,Dim,Projective,LhsOptions>,Transform<Scalar,Dim,AffineCompact,RhsOptions>,true >
{
typedef Transform<Scalar,Dim,Projective,LhsOptions> Lhs;
typedef Transform<Scalar,Dim,AffineCompact,RhsOptions> Rhs;
typedef Transform<Scalar,Dim,Projective> ResultType;
static ResultType run(const Lhs& lhs, const Rhs& rhs)
{
ResultType res(lhs.matrix().template leftCols<Dim>() * rhs.matrix());
res.matrix().col(Dim) += lhs.matrix().col(Dim);
return res;
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRANSFORM_H
| 60,514 | 38.219054 | 148 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/Translation.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TRANSLATION_H
#define EIGEN_TRANSLATION_H
namespace Eigen {
/** \geometry_module \ingroup Geometry_Module
*
* \class Translation
*
* \brief Represents a translation transformation
*
* \tparam _Scalar the scalar type, i.e., the type of the coefficients.
* \tparam _Dim the dimension of the space, can be a compile time value or Dynamic
*
* \note This class is not aimed to be used to store a translation transformation,
* but rather to make easier the constructions and updates of Transform objects.
*
* \sa class Scaling, class Transform
*/
template<typename _Scalar, int _Dim>
class Translation
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)
/** dimension of the space */
enum { Dim = _Dim };
/** the scalar type of the coefficients */
typedef _Scalar Scalar;
/** corresponding vector type */
typedef Matrix<Scalar,Dim,1> VectorType;
/** corresponding linear transformation matrix type */
typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
/** corresponding affine transformation type */
typedef Transform<Scalar,Dim,Affine> AffineTransformType;
/** corresponding isometric transformation type */
typedef Transform<Scalar,Dim,Isometry> IsometryTransformType;
protected:
VectorType m_coeffs;
public:
/** Default constructor without initialization. */
EIGEN_DEVICE_FUNC Translation() {}
/** */
EIGEN_DEVICE_FUNC inline Translation(const Scalar& sx, const Scalar& sy)
{
eigen_assert(Dim==2);
m_coeffs.x() = sx;
m_coeffs.y() = sy;
}
/** */
EIGEN_DEVICE_FUNC inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz)
{
eigen_assert(Dim==3);
m_coeffs.x() = sx;
m_coeffs.y() = sy;
m_coeffs.z() = sz;
}
/** Constructs and initialize the translation transformation from a vector of translation coefficients */
EIGEN_DEVICE_FUNC explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {}
/** \brief Retruns the x-translation by value. **/
EIGEN_DEVICE_FUNC inline Scalar x() const { return m_coeffs.x(); }
/** \brief Retruns the y-translation by value. **/
EIGEN_DEVICE_FUNC inline Scalar y() const { return m_coeffs.y(); }
/** \brief Retruns the z-translation by value. **/
EIGEN_DEVICE_FUNC inline Scalar z() const { return m_coeffs.z(); }
/** \brief Retruns the x-translation as a reference. **/
EIGEN_DEVICE_FUNC inline Scalar& x() { return m_coeffs.x(); }
/** \brief Retruns the y-translation as a reference. **/
EIGEN_DEVICE_FUNC inline Scalar& y() { return m_coeffs.y(); }
/** \brief Retruns the z-translation as a reference. **/
EIGEN_DEVICE_FUNC inline Scalar& z() { return m_coeffs.z(); }
EIGEN_DEVICE_FUNC const VectorType& vector() const { return m_coeffs; }
EIGEN_DEVICE_FUNC VectorType& vector() { return m_coeffs; }
EIGEN_DEVICE_FUNC const VectorType& translation() const { return m_coeffs; }
EIGEN_DEVICE_FUNC VectorType& translation() { return m_coeffs; }
/** Concatenates two translation */
EIGEN_DEVICE_FUNC inline Translation operator* (const Translation& other) const
{ return Translation(m_coeffs + other.m_coeffs); }
/** Concatenates a translation and a uniform scaling */
EIGEN_DEVICE_FUNC inline AffineTransformType operator* (const UniformScaling<Scalar>& other) const;
/** Concatenates a translation and a linear transformation */
template<typename OtherDerived>
EIGEN_DEVICE_FUNC inline AffineTransformType operator* (const EigenBase<OtherDerived>& linear) const;
/** Concatenates a translation and a rotation */
template<typename Derived>
EIGEN_DEVICE_FUNC inline IsometryTransformType operator*(const RotationBase<Derived,Dim>& r) const
{ return *this * IsometryTransformType(r); }
/** \returns the concatenation of a linear transformation \a l with the translation \a t */
// its a nightmare to define a templated friend function outside its declaration
template<typename OtherDerived> friend
EIGEN_DEVICE_FUNC inline AffineTransformType operator*(const EigenBase<OtherDerived>& linear, const Translation& t)
{
AffineTransformType res;
res.matrix().setZero();
res.linear() = linear.derived();
res.translation() = linear.derived() * t.m_coeffs;
res.matrix().row(Dim).setZero();
res(Dim,Dim) = Scalar(1);
return res;
}
/** Concatenates a translation and a transformation */
template<int Mode, int Options>
EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode> operator* (const Transform<Scalar,Dim,Mode,Options>& t) const
{
Transform<Scalar,Dim,Mode> res = t;
res.pretranslate(m_coeffs);
return res;
}
/** Applies translation to vector */
template<typename Derived>
inline typename internal::enable_if<Derived::IsVectorAtCompileTime,VectorType>::type
operator* (const MatrixBase<Derived>& vec) const
{ return m_coeffs + vec.derived(); }
/** \returns the inverse translation (opposite) */
Translation inverse() const { return Translation(-m_coeffs); }
Translation& operator=(const Translation& other)
{
m_coeffs = other.m_coeffs;
return *this;
}
static const Translation Identity() { return Translation(VectorType::Zero()); }
/** \returns \c *this with scalar type casted to \a NewScalarType
*
* Note that if \a NewScalarType is equal to the current scalar type of \c *this
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const
{ return typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
EIGEN_DEVICE_FUNC inline explicit Translation(const Translation<OtherScalarType,Dim>& other)
{ m_coeffs = other.vector().template cast<Scalar>(); }
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
EIGEN_DEVICE_FUNC bool isApprox(const Translation& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_coeffs.isApprox(other.m_coeffs, prec); }
};
/** \addtogroup Geometry_Module */
//@{
typedef Translation<float, 2> Translation2f;
typedef Translation<double,2> Translation2d;
typedef Translation<float, 3> Translation3f;
typedef Translation<double,3> Translation3d;
//@}
template<typename Scalar, int Dim>
EIGEN_DEVICE_FUNC inline typename Translation<Scalar,Dim>::AffineTransformType
Translation<Scalar,Dim>::operator* (const UniformScaling<Scalar>& other) const
{
AffineTransformType res;
res.matrix().setZero();
res.linear().diagonal().fill(other.factor());
res.translation() = m_coeffs;
res(Dim,Dim) = Scalar(1);
return res;
}
template<typename Scalar, int Dim>
template<typename OtherDerived>
EIGEN_DEVICE_FUNC inline typename Translation<Scalar,Dim>::AffineTransformType
Translation<Scalar,Dim>::operator* (const EigenBase<OtherDerived>& linear) const
{
AffineTransformType res;
res.matrix().setZero();
res.linear() = linear.derived();
res.translation() = m_coeffs;
res.matrix().row(Dim).setZero();
res(Dim,Dim) = Scalar(1);
return res;
}
} // end namespace Eigen
#endif // EIGEN_TRANSLATION_H
| 7,773 | 36.196172 | 150 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/Umeyama.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Hauke Heibel <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_UMEYAMA_H
#define EIGEN_UMEYAMA_H
// This file requires the user to include
// * Eigen/Core
// * Eigen/LU
// * Eigen/SVD
// * Eigen/Array
namespace Eigen {
#ifndef EIGEN_PARSED_BY_DOXYGEN
// These helpers are required since it allows to use mixed types as parameters
// for the Umeyama. The problem with mixed parameters is that the return type
// cannot trivially be deduced when float and double types are mixed.
namespace internal {
// Compile time return type deduction for different MatrixBase types.
// Different means here different alignment and parameters but the same underlying
// real scalar type.
template<typename MatrixType, typename OtherMatrixType>
struct umeyama_transform_matrix_type
{
enum {
MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),
// When possible we want to choose some small fixed size value since the result
// is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want.
HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1
};
typedef Matrix<typename traits<MatrixType>::Scalar,
HomogeneousDimension,
HomogeneousDimension,
AutoAlign | (traits<MatrixType>::Flags & RowMajorBit ? RowMajor : ColMajor),
HomogeneousDimension,
HomogeneousDimension
> type;
};
}
#endif
/**
* \geometry_module \ingroup Geometry_Module
*
* \brief Returns the transformation between two point sets.
*
* The algorithm is based on:
* "Least-squares estimation of transformation parameters between two point patterns",
* Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573
*
* It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that
* \f{align*}
* \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2
* \f}
* is minimized.
*
* The algorithm is based on the analysis of the covariance matrix
* \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$
* of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where
* \f$d\f$ is corresponding to the dimension (which is typically small).
* The analysis is involving the SVD having a complexity of \f$O(d^3)\f$
* though the actual computational effort lies in the covariance
* matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when
* the input point sets have dimension \f$d \times m\f$.
*
* Currently the method is working only for floating point matrices.
*
* \todo Should the return type of umeyama() become a Transform?
*
* \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$.
* \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$.
* \param with_scaling Sets \f$ c=1 \f$ when <code>false</code> is passed.
* \return The homogeneous transformation
* \f{align*}
* T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix}
* \f}
* minimizing the resudiual above. This transformation is always returned as an
* Eigen::Matrix.
*/
template <typename Derived, typename OtherDerived>
typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type
umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, bool with_scaling = true)
{
typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;
typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };
typedef Matrix<Scalar, Dimension, 1> VectorType;
typedef Matrix<Scalar, Dimension, Dimension> MatrixType;
typedef typename internal::plain_matrix_type_row_major<Derived>::type RowMajorMatrixType;
const Index m = src.rows(); // dimension
const Index n = src.cols(); // number of measurements
// required for demeaning ...
const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n);
// computation of mean
const VectorType src_mean = src.rowwise().sum() * one_over_n;
const VectorType dst_mean = dst.rowwise().sum() * one_over_n;
// demeaning of src and dst points
const RowMajorMatrixType src_demean = src.colwise() - src_mean;
const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean;
// Eq. (36)-(37)
const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n;
// Eq. (38)
const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose();
JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV);
// Initialize the resulting transformation with an identity matrix...
TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1);
// Eq. (39)
VectorType S = VectorType::Ones(m);
if ( svd.matrixU().determinant() * svd.matrixV().determinant() < 0 )
S(m-1) = -1;
// Eq. (40) and (43)
Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
if (with_scaling)
{
// Eq. (42)
const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S);
// Eq. (41)
Rt.col(m).head(m) = dst_mean;
Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;
Rt.block(0,0,m,m) *= c;
}
else
{
Rt.col(m).head(m) = dst_mean;
Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean;
}
return Rt;
}
} // end namespace Eigen
#endif // EIGEN_UMEYAMA_H
| 6,191 | 36.077844 | 124 |
h
|
abess
|
abess-master/python/include/Eigen/src/Geometry/arch/Geometry_SSE.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Rohit Garg <[email protected]>
// Copyright (C) 2009-2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GEOMETRY_SSE_H
#define EIGEN_GEOMETRY_SSE_H
namespace Eigen {
namespace internal {
template<class Derived, class OtherDerived>
struct quat_product<Architecture::SSE, Derived, OtherDerived, float>
{
enum {
AAlignment = traits<Derived>::Alignment,
BAlignment = traits<OtherDerived>::Alignment,
ResAlignment = traits<Quaternion<float> >::Alignment
};
static inline Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
{
Quaternion<float> res;
const __m128 mask = _mm_setr_ps(0.f,0.f,0.f,-0.f);
__m128 a = _a.coeffs().template packet<AAlignment>(0);
__m128 b = _b.coeffs().template packet<BAlignment>(0);
__m128 s1 = _mm_mul_ps(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2));
__m128 s2 = _mm_mul_ps(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1));
pstoret<float,Packet4f,ResAlignment>(
&res.x(),
_mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)),
_mm_mul_ps(vec4f_swizzle1(a,2,0,1,0),
vec4f_swizzle1(b,1,2,0,0))),
_mm_xor_ps(mask,_mm_add_ps(s1,s2))));
return res;
}
};
template<class Derived>
struct quat_conj<Architecture::SSE, Derived, float>
{
enum {
ResAlignment = traits<Quaternion<float> >::Alignment
};
static inline Quaternion<float> run(const QuaternionBase<Derived>& q)
{
Quaternion<float> res;
const __m128 mask = _mm_setr_ps(-0.f,-0.f,-0.f,0.f);
pstoret<float,Packet4f,ResAlignment>(&res.x(), _mm_xor_ps(mask, q.coeffs().template packet<traits<Derived>::Alignment>(0)));
return res;
}
};
template<typename VectorLhs,typename VectorRhs>
struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true>
{
enum {
ResAlignment = traits<typename plain_matrix_type<VectorLhs>::type>::Alignment
};
static inline typename plain_matrix_type<VectorLhs>::type
run(const VectorLhs& lhs, const VectorRhs& rhs)
{
__m128 a = lhs.template packet<traits<VectorLhs>::Alignment>(0);
__m128 b = rhs.template packet<traits<VectorRhs>::Alignment>(0);
__m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3));
__m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));
typename plain_matrix_type<VectorLhs>::type res;
pstoret<float,Packet4f,ResAlignment>(&res.x(),_mm_sub_ps(mul1,mul2));
return res;
}
};
template<class Derived, class OtherDerived>
struct quat_product<Architecture::SSE, Derived, OtherDerived, double>
{
enum {
BAlignment = traits<OtherDerived>::Alignment,
ResAlignment = traits<Quaternion<double> >::Alignment
};
static inline Quaternion<double> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
{
const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
Quaternion<double> res;
const double* a = _a.coeffs().data();
Packet2d b_xy = _b.coeffs().template packet<BAlignment>(0);
Packet2d b_zw = _b.coeffs().template packet<BAlignment>(2);
Packet2d a_xx = pset1<Packet2d>(a[0]);
Packet2d a_yy = pset1<Packet2d>(a[1]);
Packet2d a_zz = pset1<Packet2d>(a[2]);
Packet2d a_ww = pset1<Packet2d>(a[3]);
// two temporaries:
Packet2d t1, t2;
/*
* t1 = ww*xy + yy*zw
* t2 = zz*xy - xx*zw
* res.xy = t1 +/- swap(t2)
*/
t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw));
t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw));
#ifdef EIGEN_VECTORIZE_SSE3
EIGEN_UNUSED_VARIABLE(mask)
pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_addsub_pd(t1, preverse(t2)));
#else
pstoret<double,Packet2d,ResAlignment>(&res.x(), padd(t1, pxor(mask,preverse(t2))));
#endif
/*
* t1 = ww*zw - yy*xy
* t2 = zz*zw + xx*xy
* res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2)
*/
t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy));
t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy));
#ifdef EIGEN_VECTORIZE_SSE3
EIGEN_UNUSED_VARIABLE(mask)
pstoret<double,Packet2d,ResAlignment>(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2)));
#else
pstoret<double,Packet2d,ResAlignment>(&res.z(), psub(t1, pxor(mask,preverse(t2))));
#endif
return res;
}
};
template<class Derived>
struct quat_conj<Architecture::SSE, Derived, double>
{
enum {
ResAlignment = traits<Quaternion<double> >::Alignment
};
static inline Quaternion<double> run(const QuaternionBase<Derived>& q)
{
Quaternion<double> res;
const __m128d mask0 = _mm_setr_pd(-0.,-0.);
const __m128d mask2 = _mm_setr_pd(-0.,0.);
pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_xor_pd(mask0, q.coeffs().template packet<traits<Derived>::Alignment>(0)));
pstoret<double,Packet2d,ResAlignment>(&res.z(), _mm_xor_pd(mask2, q.coeffs().template packet<traits<Derived>::Alignment>(2)));
return res;
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GEOMETRY_SSE_H
| 5,387 | 32.259259 | 130 |
h
|
abess
|
abess-master/python/include/Eigen/src/Householder/BlockHouseholder.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Vincent Lejeune
// Copyright (C) 2010 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BLOCK_HOUSEHOLDER_H
#define EIGEN_BLOCK_HOUSEHOLDER_H
// This file contains some helper function to deal with block householder reflectors
namespace Eigen {
namespace internal {
/** \internal */
// template<typename TriangularFactorType,typename VectorsType,typename CoeffsType>
// void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs)
// {
// typedef typename VectorsType::Scalar Scalar;
// const Index nbVecs = vectors.cols();
// eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);
//
// for(Index i = 0; i < nbVecs; i++)
// {
// Index rs = vectors.rows() - i;
// // Warning, note that hCoeffs may alias with vectors.
// // It is then necessary to copy it before modifying vectors(i,i).
// typename CoeffsType::Scalar h = hCoeffs(i);
// // This hack permits to pass trough nested Block<> and Transpose<> expressions.
// Scalar *Vii_ptr = const_cast<Scalar*>(vectors.data() + vectors.outerStride()*i + vectors.innerStride()*i);
// Scalar Vii = *Vii_ptr;
// *Vii_ptr = Scalar(1);
// triFactor.col(i).head(i).noalias() = -h * vectors.block(i, 0, rs, i).adjoint()
// * vectors.col(i).tail(rs);
// *Vii_ptr = Vii;
// // FIXME add .noalias() once the triangular product can work inplace
// triFactor.col(i).head(i) = triFactor.block(0,0,i,i).template triangularView<Upper>()
// * triFactor.col(i).head(i);
// triFactor(i,i) = hCoeffs(i);
// }
// }
/** \internal */
// This variant avoid modifications in vectors
template<typename TriangularFactorType,typename VectorsType,typename CoeffsType>
void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs)
{
const Index nbVecs = vectors.cols();
eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);
for(Index i = nbVecs-1; i >=0 ; --i)
{
Index rs = vectors.rows() - i - 1;
Index rt = nbVecs-i-1;
if(rt>0)
{
triFactor.row(i).tail(rt).noalias() = -hCoeffs(i) * vectors.col(i).tail(rs).adjoint()
* vectors.bottomRightCorner(rs, rt).template triangularView<UnitLower>();
// FIXME add .noalias() once the triangular product can work inplace
triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView<Upper>();
}
triFactor(i,i) = hCoeffs(i);
}
}
/** \internal
* if forward then perform mat = H0 * H1 * H2 * mat
* otherwise perform mat = H2 * H1 * H0 * mat
*/
template<typename MatrixType,typename VectorsType,typename CoeffsType>
void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs, bool forward)
{
enum { TFactorSize = MatrixType::ColsAtCompileTime };
Index nbVecs = vectors.cols();
Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, RowMajor> T(nbVecs,nbVecs);
if(forward) make_block_householder_triangular_factor(T, vectors, hCoeffs);
else make_block_householder_triangular_factor(T, vectors, hCoeffs.conjugate());
const TriangularView<const VectorsType, UnitLower> V(vectors);
// A -= V T V^* A
Matrix<typename MatrixType::Scalar,VectorsType::ColsAtCompileTime,MatrixType::ColsAtCompileTime,
(VectorsType::MaxColsAtCompileTime==1 && MatrixType::MaxColsAtCompileTime!=1)?RowMajor:ColMajor,
VectorsType::MaxColsAtCompileTime,MatrixType::MaxColsAtCompileTime> tmp = V.adjoint() * mat;
// FIXME add .noalias() once the triangular product can work inplace
if(forward) tmp = T.template triangularView<Upper>() * tmp;
else tmp = T.template triangularView<Upper>().adjoint() * tmp;
mat.noalias() -= V * tmp;
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_BLOCK_HOUSEHOLDER_H
| 4,481 | 42.096154 | 136 |
h
|
abess
|
abess-master/python/include/Eigen/src/Householder/Householder.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Benoit Jacob <[email protected]>
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_HOUSEHOLDER_H
#define EIGEN_HOUSEHOLDER_H
namespace Eigen {
namespace internal {
template<int n> struct decrement_size
{
enum {
ret = n==Dynamic ? n : n-1
};
};
}
/** Computes the elementary reflector H such that:
* \f$ H *this = [ beta 0 ... 0]^T \f$
* where the transformation H is:
* \f$ H = I - tau v v^*\f$
* and the vector v is:
* \f$ v^T = [1 essential^T] \f$
*
* The essential part of the vector \c v is stored in *this.
*
* On output:
* \param tau the scaling factor of the Householder transformation
* \param beta the result of H * \c *this
*
* \sa MatrixBase::makeHouseholder(), MatrixBase::applyHouseholderOnTheLeft(),
* MatrixBase::applyHouseholderOnTheRight()
*/
template<typename Derived>
void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta)
{
VectorBlock<Derived, internal::decrement_size<Base::SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1);
makeHouseholder(essentialPart, tau, beta);
}
/** Computes the elementary reflector H such that:
* \f$ H *this = [ beta 0 ... 0]^T \f$
* where the transformation H is:
* \f$ H = I - tau v v^*\f$
* and the vector v is:
* \f$ v^T = [1 essential^T] \f$
*
* On output:
* \param essential the essential part of the vector \c v
* \param tau the scaling factor of the Householder transformation
* \param beta the result of H * \c *this
*
* \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(),
* MatrixBase::applyHouseholderOnTheRight()
*/
template<typename Derived>
template<typename EssentialPart>
void MatrixBase<Derived>::makeHouseholder(
EssentialPart& essential,
Scalar& tau,
RealScalar& beta) const
{
using std::sqrt;
using numext::conj;
EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart)
VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1);
RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm();
Scalar c0 = coeff(0);
const RealScalar tol = (std::numeric_limits<RealScalar>::min)();
if(tailSqNorm <= tol && numext::abs2(numext::imag(c0))<=tol)
{
tau = RealScalar(0);
beta = numext::real(c0);
essential.setZero();
}
else
{
beta = sqrt(numext::abs2(c0) + tailSqNorm);
if (numext::real(c0)>=RealScalar(0))
beta = -beta;
essential = tail / (c0 - beta);
tau = conj((beta - c0) / beta);
}
}
/** Apply the elementary reflector H given by
* \f$ H = I - tau v v^*\f$
* with
* \f$ v^T = [1 essential^T] \f$
* from the left to a vector or matrix.
*
* On input:
* \param essential the essential part of the vector \c v
* \param tau the scaling factor of the Householder transformation
* \param workspace a pointer to working space with at least
* this->cols() * essential.size() entries
*
* \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(),
* MatrixBase::applyHouseholderOnTheRight()
*/
template<typename Derived>
template<typename EssentialPart>
void MatrixBase<Derived>::applyHouseholderOnTheLeft(
const EssentialPart& essential,
const Scalar& tau,
Scalar* workspace)
{
if(rows() == 1)
{
*this *= Scalar(1)-tau;
}
else if(tau!=Scalar(0))
{
Map<typename internal::plain_row_type<PlainObject>::type> tmp(workspace,cols());
Block<Derived, EssentialPart::SizeAtCompileTime, Derived::ColsAtCompileTime> bottom(derived(), 1, 0, rows()-1, cols());
tmp.noalias() = essential.adjoint() * bottom;
tmp += this->row(0);
this->row(0) -= tau * tmp;
bottom.noalias() -= tau * essential * tmp;
}
}
/** Apply the elementary reflector H given by
* \f$ H = I - tau v v^*\f$
* with
* \f$ v^T = [1 essential^T] \f$
* from the right to a vector or matrix.
*
* On input:
* \param essential the essential part of the vector \c v
* \param tau the scaling factor of the Householder transformation
* \param workspace a pointer to working space with at least
* this->cols() * essential.size() entries
*
* \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(),
* MatrixBase::applyHouseholderOnTheLeft()
*/
template<typename Derived>
template<typename EssentialPart>
void MatrixBase<Derived>::applyHouseholderOnTheRight(
const EssentialPart& essential,
const Scalar& tau,
Scalar* workspace)
{
if(cols() == 1)
{
*this *= Scalar(1)-tau;
}
else if(tau!=Scalar(0))
{
Map<typename internal::plain_col_type<PlainObject>::type> tmp(workspace,rows());
Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1);
tmp.noalias() = right * essential.conjugate();
tmp += this->col(0);
this->col(0) -= tau * tmp;
right.noalias() -= tau * tmp * essential.transpose();
}
}
} // end namespace Eigen
#endif // EIGEN_HOUSEHOLDER_H
| 5,345 | 29.901734 | 123 |
h
|
abess
|
abess-master/python/include/Eigen/src/Householder/HouseholderSequence.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2010 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H
#define EIGEN_HOUSEHOLDER_SEQUENCE_H
namespace Eigen {
/** \ingroup Householder_Module
* \householder_module
* \class HouseholderSequence
* \brief Sequence of Householder reflections acting on subspaces with decreasing size
* \tparam VectorsType type of matrix containing the Householder vectors
* \tparam CoeffsType type of vector containing the Householder coefficients
* \tparam Side either OnTheLeft (the default) or OnTheRight
*
* This class represents a product sequence of Householder reflections where the first Householder reflection
* acts on the whole space, the second Householder reflection leaves the one-dimensional subspace spanned by
* the first unit vector invariant, the third Householder reflection leaves the two-dimensional subspace
* spanned by the first two unit vectors invariant, and so on up to the last reflection which leaves all but
* one dimensions invariant and acts only on the last dimension. Such sequences of Householder reflections
* are used in several algorithms to zero out certain parts of a matrix. Indeed, the methods
* HessenbergDecomposition::matrixQ(), Tridiagonalization::matrixQ(), HouseholderQR::householderQ(),
* and ColPivHouseholderQR::householderQ() all return a %HouseholderSequence.
*
* More precisely, the class %HouseholderSequence represents an \f$ n \times n \f$ matrix \f$ H \f$ of the
* form \f$ H = \prod_{i=0}^{n-1} H_i \f$ where the i-th Householder reflection is \f$ H_i = I - h_i v_i
* v_i^* \f$. The i-th Householder coefficient \f$ h_i \f$ is a scalar and the i-th Householder vector \f$
* v_i \f$ is a vector of the form
* \f[
* v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ].
* \f]
* The last \f$ n-i \f$ entries of \f$ v_i \f$ are called the essential part of the Householder vector.
*
* Typical usages are listed below, where H is a HouseholderSequence:
* \code
* A.applyOnTheRight(H); // A = A * H
* A.applyOnTheLeft(H); // A = H * A
* A.applyOnTheRight(H.adjoint()); // A = A * H^*
* A.applyOnTheLeft(H.adjoint()); // A = H^* * A
* MatrixXd Q = H; // conversion to a dense matrix
* \endcode
* In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate operators.
*
* See the documentation for HouseholderSequence(const VectorsType&, const CoeffsType&) for an example.
*
* \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
namespace internal {
template<typename VectorsType, typename CoeffsType, int Side>
struct traits<HouseholderSequence<VectorsType,CoeffsType,Side> >
{
typedef typename VectorsType::Scalar Scalar;
typedef typename VectorsType::StorageIndex StorageIndex;
typedef typename VectorsType::StorageKind StorageKind;
enum {
RowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::RowsAtCompileTime
: traits<VectorsType>::ColsAtCompileTime,
ColsAtCompileTime = RowsAtCompileTime,
MaxRowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::MaxRowsAtCompileTime
: traits<VectorsType>::MaxColsAtCompileTime,
MaxColsAtCompileTime = MaxRowsAtCompileTime,
Flags = 0
};
};
struct HouseholderSequenceShape {};
template<typename VectorsType, typename CoeffsType, int Side>
struct evaluator_traits<HouseholderSequence<VectorsType,CoeffsType,Side> >
: public evaluator_traits_base<HouseholderSequence<VectorsType,CoeffsType,Side> >
{
typedef HouseholderSequenceShape Shape;
};
template<typename VectorsType, typename CoeffsType, int Side>
struct hseq_side_dependent_impl
{
typedef Block<const VectorsType, Dynamic, 1> EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{
Index start = k+1+h.m_shift;
return Block<const VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1);
}
};
template<typename VectorsType, typename CoeffsType>
struct hseq_side_dependent_impl<VectorsType, CoeffsType, OnTheRight>
{
typedef Transpose<Block<const VectorsType, 1, Dynamic> > EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType;
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{
Index start = k+1+h.m_shift;
return Block<const VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose();
}
};
template<typename OtherScalarType, typename MatrixType> struct matrix_type_times_scalar_type
{
typedef typename ScalarBinaryOpTraits<OtherScalarType, typename MatrixType::Scalar>::ReturnType
ResultScalar;
typedef Matrix<ResultScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime,
0, MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime> Type;
};
} // end namespace internal
template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence
: public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> >
{
typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType EssentialVectorType;
public:
enum {
RowsAtCompileTime = internal::traits<HouseholderSequence>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<HouseholderSequence>::ColsAtCompileTime,
MaxRowsAtCompileTime = internal::traits<HouseholderSequence>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = internal::traits<HouseholderSequence>::MaxColsAtCompileTime
};
typedef typename internal::traits<HouseholderSequence>::Scalar Scalar;
typedef HouseholderSequence<
typename internal::conditional<NumTraits<Scalar>::IsComplex,
typename internal::remove_all<typename VectorsType::ConjugateReturnType>::type,
VectorsType>::type,
typename internal::conditional<NumTraits<Scalar>::IsComplex,
typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type,
CoeffsType>::type,
Side
> ConjugateReturnType;
/** \brief Constructor.
* \param[in] v %Matrix containing the essential parts of the Householder vectors
* \param[in] h Vector containing the Householder coefficients
*
* Constructs the Householder sequence with coefficients given by \p h and vectors given by \p v. The
* i-th Householder coefficient \f$ h_i \f$ is given by \p h(i) and the essential part of the i-th
* Householder vector \f$ v_i \f$ is given by \p v(k,i) with \p k > \p i (the subdiagonal part of the
* i-th column). If \p v has fewer columns than rows, then the Householder sequence contains as many
* Householder reflections as there are columns.
*
* \note The %HouseholderSequence object stores \p v and \p h by reference.
*
* Example: \include HouseholderSequence_HouseholderSequence.cpp
* Output: \verbinclude HouseholderSequence_HouseholderSequence.out
*
* \sa setLength(), setShift()
*/
HouseholderSequence(const VectorsType& v, const CoeffsType& h)
: m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()),
m_shift(0)
{
}
/** \brief Copy constructor. */
HouseholderSequence(const HouseholderSequence& other)
: m_vectors(other.m_vectors),
m_coeffs(other.m_coeffs),
m_trans(other.m_trans),
m_length(other.m_length),
m_shift(other.m_shift)
{
}
/** \brief Number of rows of transformation viewed as a matrix.
* \returns Number of rows
* \details This equals the dimension of the space that the transformation acts on.
*/
Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }
/** \brief Number of columns of transformation viewed as a matrix.
* \returns Number of columns
* \details This equals the dimension of the space that the transformation acts on.
*/
Index cols() const { return rows(); }
/** \brief Essential part of a Householder vector.
* \param[in] k Index of Householder reflection
* \returns Vector containing non-trivial entries of k-th Householder vector
*
* This function returns the essential part of the Householder vector \f$ v_i \f$. This is a vector of
* length \f$ n-i \f$ containing the last \f$ n-i \f$ entries of the vector
* \f[
* v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ].
* \f]
* The index \f$ i \f$ equals \p k + shift(), corresponding to the k-th column of the matrix \p v
* passed to the constructor.
*
* \sa setShift(), shift()
*/
const EssentialVectorType essentialVector(Index k) const
{
eigen_assert(k >= 0 && k < m_length);
return internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::essentialVector(*this, k);
}
/** \brief %Transpose of the Householder sequence. */
HouseholderSequence transpose() const
{
return HouseholderSequence(*this).setTrans(!m_trans);
}
/** \brief Complex conjugate of the Householder sequence. */
ConjugateReturnType conjugate() const
{
return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate())
.setTrans(m_trans)
.setLength(m_length)
.setShift(m_shift);
}
/** \brief Adjoint (conjugate transpose) of the Householder sequence. */
ConjugateReturnType adjoint() const
{
return conjugate().setTrans(!m_trans);
}
/** \brief Inverse of the Householder sequence (equals the adjoint). */
ConjugateReturnType inverse() const { return adjoint(); }
/** \internal */
template<typename DestType> inline void evalTo(DestType& dst) const
{
Matrix<Scalar, DestType::RowsAtCompileTime, 1,
AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> workspace(rows());
evalTo(dst, workspace);
}
/** \internal */
template<typename Dest, typename Workspace>
void evalTo(Dest& dst, Workspace& workspace) const
{
workspace.resize(rows());
Index vecs = m_length;
if(internal::is_same_dense(dst,m_vectors))
{
// in-place
dst.diagonal().setOnes();
dst.template triangularView<StrictlyUpper>().setZero();
for(Index k = vecs-1; k >= 0; --k)
{
Index cornerSize = rows() - k - m_shift;
if(m_trans)
dst.bottomRightCorner(cornerSize, cornerSize)
.applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data());
else
dst.bottomRightCorner(cornerSize, cornerSize)
.applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data());
// clear the off diagonal vector
dst.col(k).tail(rows()-k-1).setZero();
}
// clear the remaining columns if needed
for(Index k = 0; k<cols()-vecs ; ++k)
dst.col(k).tail(rows()-k-1).setZero();
}
else
{
dst.setIdentity(rows(), rows());
for(Index k = vecs-1; k >= 0; --k)
{
Index cornerSize = rows() - k - m_shift;
if(m_trans)
dst.bottomRightCorner(cornerSize, cornerSize)
.applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));
else
dst.bottomRightCorner(cornerSize, cornerSize)
.applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));
}
}
}
/** \internal */
template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const
{
Matrix<Scalar,1,Dest::RowsAtCompileTime,RowMajor,1,Dest::MaxRowsAtCompileTime> workspace(dst.rows());
applyThisOnTheRight(dst, workspace);
}
/** \internal */
template<typename Dest, typename Workspace>
inline void applyThisOnTheRight(Dest& dst, Workspace& workspace) const
{
workspace.resize(dst.rows());
for(Index k = 0; k < m_length; ++k)
{
Index actual_k = m_trans ? m_length-k-1 : k;
dst.rightCols(rows()-m_shift-actual_k)
.applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());
}
}
/** \internal */
template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const
{
Matrix<Scalar,1,Dest::ColsAtCompileTime,RowMajor,1,Dest::MaxColsAtCompileTime> workspace;
applyThisOnTheLeft(dst, workspace);
}
/** \internal */
template<typename Dest, typename Workspace>
inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const
{
const Index BlockSize = 48;
// if the entries are large enough, then apply the reflectors by block
if(m_length>=BlockSize && dst.cols()>1)
{
for(Index i = 0; i < m_length; i+=BlockSize)
{
Index end = m_trans ? (std::min)(m_length,i+BlockSize) : m_length-i;
Index k = m_trans ? i : (std::max)(Index(0),end-BlockSize);
Index bs = end-k;
Index start = k + m_shift;
typedef Block<typename internal::remove_all<VectorsType>::type,Dynamic,Dynamic> SubVectorsType;
SubVectorsType sub_vecs1(m_vectors.const_cast_derived(), Side==OnTheRight ? k : start,
Side==OnTheRight ? start : k,
Side==OnTheRight ? bs : m_vectors.rows()-start,
Side==OnTheRight ? m_vectors.cols()-start : bs);
typename internal::conditional<Side==OnTheRight, Transpose<SubVectorsType>, SubVectorsType&>::type sub_vecs(sub_vecs1);
Block<Dest,Dynamic,Dynamic> sub_dst(dst,dst.rows()-rows()+m_shift+k,0, rows()-m_shift-k,dst.cols());
apply_block_householder_on_the_left(sub_dst, sub_vecs, m_coeffs.segment(k, bs), !m_trans);
}
}
else
{
workspace.resize(dst.cols());
for(Index k = 0; k < m_length; ++k)
{
Index actual_k = m_trans ? k : m_length-k-1;
dst.bottomRows(rows()-m_shift-actual_k)
.applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());
}
}
}
/** \brief Computes the product of a Householder sequence with a matrix.
* \param[in] other %Matrix being multiplied.
* \returns Expression object representing the product.
*
* This function computes \f$ HM \f$ where \f$ H \f$ is the Householder sequence represented by \p *this
* and \f$ M \f$ is the matrix \p other.
*/
template<typename OtherDerived>
typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other) const
{
typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type
res(other.template cast<typename internal::matrix_type_times_scalar_type<Scalar,OtherDerived>::ResultScalar>());
applyThisOnTheLeft(res);
return res;
}
template<typename _VectorsType, typename _CoeffsType, int _Side> friend struct internal::hseq_side_dependent_impl;
/** \brief Sets the length of the Householder sequence.
* \param [in] length New value for the length.
*
* By default, the length \f$ n \f$ of the Householder sequence \f$ H = H_0 H_1 \ldots H_{n-1} \f$ is set
* to the number of columns of the matrix \p v passed to the constructor, or the number of rows if that
* is smaller. After this function is called, the length equals \p length.
*
* \sa length()
*/
HouseholderSequence& setLength(Index length)
{
m_length = length;
return *this;
}
/** \brief Sets the shift of the Householder sequence.
* \param [in] shift New value for the shift.
*
* By default, a %HouseholderSequence object represents \f$ H = H_0 H_1 \ldots H_{n-1} \f$ and the i-th
* column of the matrix \p v passed to the constructor corresponds to the i-th Householder
* reflection. After this function is called, the object represents \f$ H = H_{\mathrm{shift}}
* H_{\mathrm{shift}+1} \ldots H_{n-1} \f$ and the i-th column of \p v corresponds to the (shift+i)-th
* Householder reflection.
*
* \sa shift()
*/
HouseholderSequence& setShift(Index shift)
{
m_shift = shift;
return *this;
}
Index length() const { return m_length; } /**< \brief Returns the length of the Householder sequence. */
Index shift() const { return m_shift; } /**< \brief Returns the shift of the Householder sequence. */
/* Necessary for .adjoint() and .conjugate() */
template <typename VectorsType2, typename CoeffsType2, int Side2> friend class HouseholderSequence;
protected:
/** \brief Sets the transpose flag.
* \param [in] trans New value of the transpose flag.
*
* By default, the transpose flag is not set. If the transpose flag is set, then this object represents
* \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$.
*
* \sa trans()
*/
HouseholderSequence& setTrans(bool trans)
{
m_trans = trans;
return *this;
}
bool trans() const { return m_trans; } /**< \brief Returns the transpose flag. */
typename VectorsType::Nested m_vectors;
typename CoeffsType::Nested m_coeffs;
bool m_trans;
Index m_length;
Index m_shift;
};
/** \brief Computes the product of a matrix with a Householder sequence.
* \param[in] other %Matrix being multiplied.
* \param[in] h %HouseholderSequence being multiplied.
* \returns Expression object representing the product.
*
* This function computes \f$ MH \f$ where \f$ M \f$ is the matrix \p other and \f$ H \f$ is the
* Householder sequence represented by \p h.
*/
template<typename OtherDerived, typename VectorsType, typename CoeffsType, int Side>
typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other, const HouseholderSequence<VectorsType,CoeffsType,Side>& h)
{
typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type
res(other.template cast<typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::ResultScalar>());
h.applyThisOnTheRight(res);
return res;
}
/** \ingroup Householder_Module \householder_module
* \brief Convenience function for constructing a Householder sequence.
* \returns A HouseholderSequence constructed from the specified arguments.
*/
template<typename VectorsType, typename CoeffsType>
HouseholderSequence<VectorsType,CoeffsType> householderSequence(const VectorsType& v, const CoeffsType& h)
{
return HouseholderSequence<VectorsType,CoeffsType,OnTheLeft>(v, h);
}
/** \ingroup Householder_Module \householder_module
* \brief Convenience function for constructing a Householder sequence.
* \returns A HouseholderSequence constructed from the specified arguments.
* \details This function differs from householderSequence() in that the template argument \p OnTheSide of
* the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft.
*/
template<typename VectorsType, typename CoeffsType>
HouseholderSequence<VectorsType,CoeffsType,OnTheRight> rightHouseholderSequence(const VectorsType& v, const CoeffsType& h)
{
return HouseholderSequence<VectorsType,CoeffsType,OnTheRight>(v, h);
}
} // end namespace Eigen
#endif // EIGEN_HOUSEHOLDER_SEQUENCE_H
| 20,603 | 42.745223 | 205 |
h
|
abess
|
abess-master/python/include/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BASIC_PRECONDITIONERS_H
#define EIGEN_BASIC_PRECONDITIONERS_H
namespace Eigen {
/** \ingroup IterativeLinearSolvers_Module
* \brief A preconditioner based on the digonal entries
*
* This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix.
* In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
\code
A.diagonal().asDiagonal() . x = b
\endcode
*
* \tparam _Scalar the type of the scalar.
*
* \implsparsesolverconcept
*
* This preconditioner is suitable for both selfadjoint and general problems.
* The diagonal entries are pre-inverted and stored into a dense vector.
*
* \note A variant that has yet to be implemented would attempt to preserve the norm of each column.
*
* \sa class LeastSquareDiagonalPreconditioner, class ConjugateGradient
*/
template <typename _Scalar>
class DiagonalPreconditioner
{
typedef _Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> Vector;
public:
typedef typename Vector::StorageIndex StorageIndex;
enum {
ColsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic
};
DiagonalPreconditioner() : m_isInitialized(false) {}
template<typename MatType>
explicit DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols())
{
compute(mat);
}
Index rows() const { return m_invdiag.size(); }
Index cols() const { return m_invdiag.size(); }
template<typename MatType>
DiagonalPreconditioner& analyzePattern(const MatType& )
{
return *this;
}
template<typename MatType>
DiagonalPreconditioner& factorize(const MatType& mat)
{
m_invdiag.resize(mat.cols());
for(int j=0; j<mat.outerSize(); ++j)
{
typename MatType::InnerIterator it(mat,j);
while(it && it.index()!=j) ++it;
if(it && it.index()==j && it.value()!=Scalar(0))
m_invdiag(j) = Scalar(1)/it.value();
else
m_invdiag(j) = Scalar(1);
}
m_isInitialized = true;
return *this;
}
template<typename MatType>
DiagonalPreconditioner& compute(const MatType& mat)
{
return factorize(mat);
}
/** \internal */
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_invdiag.array() * b.array() ;
}
template<typename Rhs> inline const Solve<DiagonalPreconditioner, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized.");
eigen_assert(m_invdiag.size()==b.rows()
&& "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
return Solve<DiagonalPreconditioner, Rhs>(*this, b.derived());
}
ComputationInfo info() { return Success; }
protected:
Vector m_invdiag;
bool m_isInitialized;
};
/** \ingroup IterativeLinearSolvers_Module
* \brief Jacobi preconditioner for LeastSquaresConjugateGradient
*
* This class allows to approximately solve for A' A x = A' b problems assuming A' A is a diagonal matrix.
* In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
\code
(A.adjoint() * A).diagonal().asDiagonal() * x = b
\endcode
*
* \tparam _Scalar the type of the scalar.
*
* \implsparsesolverconcept
*
* The diagonal entries are pre-inverted and stored into a dense vector.
*
* \sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner
*/
template <typename _Scalar>
class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar>
{
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DiagonalPreconditioner<_Scalar> Base;
using Base::m_invdiag;
public:
LeastSquareDiagonalPreconditioner() : Base() {}
template<typename MatType>
explicit LeastSquareDiagonalPreconditioner(const MatType& mat) : Base()
{
compute(mat);
}
template<typename MatType>
LeastSquareDiagonalPreconditioner& analyzePattern(const MatType& )
{
return *this;
}
template<typename MatType>
LeastSquareDiagonalPreconditioner& factorize(const MatType& mat)
{
// Compute the inverse squared-norm of each column of mat
m_invdiag.resize(mat.cols());
if(MatType::IsRowMajor)
{
m_invdiag.setZero();
for(Index j=0; j<mat.outerSize(); ++j)
{
for(typename MatType::InnerIterator it(mat,j); it; ++it)
m_invdiag(it.index()) += numext::abs2(it.value());
}
for(Index j=0; j<mat.cols(); ++j)
if(numext::real(m_invdiag(j))>RealScalar(0))
m_invdiag(j) = RealScalar(1)/numext::real(m_invdiag(j));
}
else
{
for(Index j=0; j<mat.outerSize(); ++j)
{
RealScalar sum = mat.innerVector(j).squaredNorm();
if(sum>RealScalar(0))
m_invdiag(j) = RealScalar(1)/sum;
else
m_invdiag(j) = RealScalar(1);
}
}
Base::m_isInitialized = true;
return *this;
}
template<typename MatType>
LeastSquareDiagonalPreconditioner& compute(const MatType& mat)
{
return factorize(mat);
}
ComputationInfo info() { return Success; }
protected:
};
/** \ingroup IterativeLinearSolvers_Module
* \brief A naive preconditioner which approximates any matrix as the identity matrix
*
* \implsparsesolverconcept
*
* \sa class DiagonalPreconditioner
*/
class IdentityPreconditioner
{
public:
IdentityPreconditioner() {}
template<typename MatrixType>
explicit IdentityPreconditioner(const MatrixType& ) {}
template<typename MatrixType>
IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; }
template<typename MatrixType>
IdentityPreconditioner& factorize(const MatrixType& ) { return *this; }
template<typename MatrixType>
IdentityPreconditioner& compute(const MatrixType& ) { return *this; }
template<typename Rhs>
inline const Rhs& solve(const Rhs& b) const { return b; }
ComputationInfo info() { return Success; }
};
} // end namespace Eigen
#endif // EIGEN_BASIC_PRECONDITIONERS_H
| 6,763 | 28.797357 | 111 |
h
|
abess
|
abess-master/python/include/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <[email protected]>
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BICGSTAB_H
#define EIGEN_BICGSTAB_H
namespace Eigen {
namespace internal {
/** \internal Low-level bi conjugate gradient stabilized algorithm
* \param mat The matrix A
* \param rhs The right hand side vector b
* \param x On input and initial solution, on output the computed solution.
* \param precond A preconditioner being able to efficiently solve for an
* approximation of Ax=b (regardless of b)
* \param iters On input the max number of iteration, on output the number of performed iterations.
* \param tol_error On input the tolerance error, on output an estimation of the relative error.
* \return false in the case of numerical issue, for example a break down of BiCGSTAB.
*/
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error)
{
using std::sqrt;
using std::abs;
typedef typename Dest::RealScalar RealScalar;
typedef typename Dest::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> VectorType;
RealScalar tol = tol_error;
Index maxIters = iters;
Index n = mat.cols();
VectorType r = rhs - mat * x;
VectorType r0 = r;
RealScalar r0_sqnorm = r0.squaredNorm();
RealScalar rhs_sqnorm = rhs.squaredNorm();
if(rhs_sqnorm == 0)
{
x.setZero();
return true;
}
Scalar rho = 1;
Scalar alpha = 1;
Scalar w = 1;
VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
VectorType y(n), z(n);
VectorType kt(n), ks(n);
VectorType s(n), t(n);
RealScalar tol2 = tol*tol*rhs_sqnorm;
RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
Index i = 0;
Index restarts = 0;
while ( r.squaredNorm() > tol2 && i<maxIters )
{
Scalar rho_old = rho;
rho = r0.dot(r);
if (abs(rho) < eps2*r0_sqnorm)
{
// The new residual vector became too orthogonal to the arbitrarily chosen direction r0
// Let's restart with a new r0:
r = rhs - mat * x;
r0 = r;
rho = r0_sqnorm = r.squaredNorm();
if(restarts++ == 0)
i = 0;
}
Scalar beta = (rho/rho_old) * (alpha / w);
p = r + beta * (p - w * v);
y = precond.solve(p);
v.noalias() = mat * y;
alpha = rho / r0.dot(v);
s = r - alpha * v;
z = precond.solve(s);
t.noalias() = mat * z;
RealScalar tmp = t.squaredNorm();
if(tmp>RealScalar(0))
w = t.dot(s) / tmp;
else
w = Scalar(0);
x += alpha * y + w * z;
r = s - w * t;
++i;
}
tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);
iters = i;
return true;
}
}
template< typename _MatrixType,
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
class BiCGSTAB;
namespace internal {
template< typename _MatrixType, typename _Preconditioner>
struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
{
typedef _MatrixType MatrixType;
typedef _Preconditioner Preconditioner;
};
}
/** \ingroup IterativeLinearSolvers_Module
* \brief A bi conjugate gradient stabilized solver for sparse square problems
*
* This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient
* stabilized algorithm. The vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* \implsparsesolverconcept
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
* and NumTraits<Scalar>::epsilon() for the tolerance.
*
* The tolerance corresponds to the relative residual error: |Ax-b|/|b|
*
* \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format.
* Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled.
* See \ref TopicMultiThreading for details.
*
* This class can be used as the direct solver classes. Here is a typical usage example:
* \include BiCGSTAB_simple.cpp
*
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method.
*
* BiCGSTAB can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, typename _Preconditioner>
class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >
{
typedef IterativeSolverBase<BiCGSTAB> Base;
using Base::matrix;
using Base::m_error;
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
public:
/** Default constructor. */
BiCGSTAB() : Base() {}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
*
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
~BiCGSTAB() {}
/** \internal */
template<typename Rhs,typename Dest>
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
bool failed = false;
for(Index j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
if(!internal::bicgstab(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
failed = true;
}
m_info = failed ? NumericalIssue
: m_error <= Base::m_tolerance ? Success
: NoConvergence;
m_isInitialized = true;
}
/** \internal */
using Base::_solve_impl;
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const
{
x.resize(this->rows(),b.cols());
x.setZero();
_solve_with_guess_impl(b,x);
}
protected:
};
} // end namespace Eigen
#endif // EIGEN_BICGSTAB_H
| 7,251 | 30.668122 | 121 |
h
|
abess
|
abess-master/python/include/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CONJUGATE_GRADIENT_H
#define EIGEN_CONJUGATE_GRADIENT_H
namespace Eigen {
namespace internal {
/** \internal Low-level conjugate gradient algorithm
* \param mat The matrix A
* \param rhs The right hand side vector b
* \param x On input and initial solution, on output the computed solution.
* \param precond A preconditioner being able to efficiently solve for an
* approximation of Ax=b (regardless of b)
* \param iters On input the max number of iteration, on output the number of performed iterations.
* \param tol_error On input the tolerance error, on output an estimation of the relative error.
*/
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
EIGEN_DONT_INLINE
void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error)
{
using std::sqrt;
using std::abs;
typedef typename Dest::RealScalar RealScalar;
typedef typename Dest::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> VectorType;
RealScalar tol = tol_error;
Index maxIters = iters;
Index n = mat.cols();
VectorType residual = rhs - mat * x; //initial residual
RealScalar rhsNorm2 = rhs.squaredNorm();
if(rhsNorm2 == 0)
{
x.setZero();
iters = 0;
tol_error = 0;
return;
}
RealScalar threshold = tol*tol*rhsNorm2;
RealScalar residualNorm2 = residual.squaredNorm();
if (residualNorm2 < threshold)
{
iters = 0;
tol_error = sqrt(residualNorm2 / rhsNorm2);
return;
}
VectorType p(n);
p = precond.solve(residual); // initial search direction
VectorType z(n), tmp(n);
RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
Index i = 0;
while(i < maxIters)
{
tmp.noalias() = mat * p; // the bottleneck of the algorithm
Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
x += alpha * p; // update solution
residual -= alpha * tmp; // update residual
residualNorm2 = residual.squaredNorm();
if(residualNorm2 < threshold)
break;
z = precond.solve(residual); // approximately solve for "A z = residual"
RealScalar absOld = absNew;
absNew = numext::real(residual.dot(z)); // update the absolute value of r
RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
p = z + beta * p; // update search direction
i++;
}
tol_error = sqrt(residualNorm2 / rhsNorm2);
iters = i;
}
}
template< typename _MatrixType, int _UpLo=Lower,
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
class ConjugateGradient;
namespace internal {
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
{
typedef _MatrixType MatrixType;
typedef _Preconditioner Preconditioner;
};
}
/** \ingroup IterativeLinearSolvers_Module
* \brief A conjugate gradient solver for sparse (or dense) self-adjoint problems
*
* This class allows to solve for A.x = b linear problems using an iterative conjugate gradient algorithm.
* The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
* \c Upper, or \c Lower|Upper in which the full matrix entries will be considered.
* Default is \c Lower, best performance is \c Lower|Upper.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* \implsparsesolverconcept
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
* and NumTraits<Scalar>::epsilon() for the tolerance.
*
* The tolerance corresponds to the relative residual error: |Ax-b|/|b|
*
* \b Performance: Even though the default value of \c _UpLo is \c Lower, significantly higher performance is
* achieved when using a complete matrix and \b Lower|Upper as the \a _UpLo template parameter. Moreover, in this
* case multi-threading can be exploited if the user code is compiled with OpenMP enabled.
* See \ref TopicMultiThreading for details.
*
* This class can be used as the direct solver classes. Here is a typical usage example:
\code
int n = 10000;
VectorXd x(n), b(n);
SparseMatrix<double> A(n,n);
// fill A and b
ConjugateGradient<SparseMatrix<double>, Lower|Upper> cg;
cg.compute(A);
x = cg.solve(b);
std::cout << "#iterations: " << cg.iterations() << std::endl;
std::cout << "estimated error: " << cg.error() << std::endl;
// update b, and solve again
x = cg.solve(b);
\endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method.
*
* ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
*
* \sa class LeastSquaresConjugateGradient, class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
{
typedef IterativeSolverBase<ConjugateGradient> Base;
using Base::matrix;
using Base::m_error;
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
enum {
UpLo = _UpLo
};
public:
/** Default constructor. */
ConjugateGradient() : Base() {}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
*
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
~ConjugateGradient() {}
/** \internal */
template<typename Rhs,typename Dest>
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
typedef typename Base::MatrixWrapper MatrixWrapper;
typedef typename Base::ActualMatrixType ActualMatrixType;
enum {
TransposeInput = (!MatrixWrapper::MatrixFree)
&& (UpLo==(Lower|Upper))
&& (!MatrixType::IsRowMajor)
&& (!NumTraits<Scalar>::IsComplex)
};
typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper;
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
typedef typename internal::conditional<UpLo==(Lower|Upper),
RowMajorWrapper,
typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type
>::type SelfAdjointWrapper;
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
for(Index j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
RowMajorWrapper row_mat(matrix());
internal::conjugate_gradient(SelfAdjointWrapper(row_mat), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
}
m_isInitialized = true;
m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
}
/** \internal */
using Base::_solve_impl;
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const
{
x.setZero();
_solve_with_guess_impl(b.derived(),x);
}
protected:
};
} // end namespace Eigen
#endif // EIGEN_CONJUGATE_GRADIENT_H
| 9,184 | 36.337398 | 164 |
h
|
abess
|
abess-master/python/include/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
// Copyright (C) 2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_INCOMPLETE_CHOlESKY_H
#define EIGEN_INCOMPLETE_CHOlESKY_H
#include <vector>
#include <list>
namespace Eigen {
/**
* \brief Modified Incomplete Cholesky with dual threshold
*
* References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
* Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999
*
* \tparam Scalar the scalar type of the input matrices
* \tparam _UpLo The triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower.
* \tparam _OrderingType The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<int>,
* unless EIGEN_MPL2_ONLY is defined, in which case the default is NaturalOrdering<int>.
*
* \implsparsesolverconcept
*
* It performs the following incomplete factorization: \f$ S P A P' S \approx L L' \f$
* where L is a lower triangular factor, S is a diagonal scaling matrix, and P is a
* fill-in reducing permutation as computed by the ordering method.
*
* \b Shifting \b strategy: Let \f$ B = S P A P' S \f$ be the scaled matrix on which the factorization is carried out,
* and \f$ \beta \f$ be the minimum value of the diagonal. If \f$ \beta > 0 \f$ then, the factorization is directly performed
* on the matrix B. Otherwise, the factorization is performed on the shifted matrix \f$ B + (\sigma+|\beta| I \f$ where
* \f$ \sigma \f$ is the initial shift value as returned and set by setInitialShift() method. The default value is \f$ \sigma = 10^{-3} \f$.
* If the factorization fails, then the shift in doubled until it succeed or a maximum of ten attempts. If it still fails, as returned by
* the info() method, then you can either increase the initial shift, or better use another preconditioning technique.
*
*/
template <typename Scalar, int _UpLo = Lower, typename _OrderingType =
#ifndef EIGEN_MPL2_ONLY
AMDOrdering<int>
#else
NaturalOrdering<int>
#endif
>
class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> >
{
protected:
typedef SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> > Base;
using Base::m_isInitialized;
public:
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef _OrderingType OrderingType;
typedef typename OrderingType::PermutationType PermutationType;
typedef typename PermutationType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> FactorType;
typedef Matrix<Scalar,Dynamic,1> VectorSx;
typedef Matrix<RealScalar,Dynamic,1> VectorRx;
typedef Matrix<StorageIndex,Dynamic, 1> VectorIx;
typedef std::vector<std::list<StorageIndex> > VectorList;
enum { UpLo = _UpLo };
enum {
ColsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic
};
public:
/** Default constructor leaving the object in a partly non-initialized stage.
*
* You must call compute() or the pair analyzePattern()/factorize() to make it valid.
*
* \sa IncompleteCholesky(const MatrixType&)
*/
IncompleteCholesky() : m_initialShift(1e-3),m_factorizationIsOk(false) {}
/** Constructor computing the incomplete factorization for the given matrix \a matrix.
*/
template<typename MatrixType>
IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_factorizationIsOk(false)
{
compute(matrix);
}
/** \returns number of rows of the factored matrix */
Index rows() const { return m_L.rows(); }
/** \returns number of columns of the factored matrix */
Index cols() const { return m_L.cols(); }
/** \brief Reports whether previous computation was successful.
*
* It triggers an assertion if \c *this has not been initialized through the respective constructor,
* or a call to compute() or analyzePattern().
*
* \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix appears to be negative.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "IncompleteCholesky is not initialized.");
return m_info;
}
/** \brief Set the initial shift parameter \f$ \sigma \f$.
*/
void setInitialShift(RealScalar shift) { m_initialShift = shift; }
/** \brief Computes the fill reducing permutation vector using the sparsity pattern of \a mat
*/
template<typename MatrixType>
void analyzePattern(const MatrixType& mat)
{
OrderingType ord;
PermutationType pinv;
ord(mat.template selfadjointView<UpLo>(), pinv);
if(pinv.size()>0) m_perm = pinv.inverse();
else m_perm.resize(0);
m_L.resize(mat.rows(), mat.cols());
m_analysisIsOk = true;
m_isInitialized = true;
m_info = Success;
}
/** \brief Performs the numerical factorization of the input matrix \a mat
*
* The method analyzePattern() or compute() must have been called beforehand
* with a matrix having the same pattern.
*
* \sa compute(), analyzePattern()
*/
template<typename MatrixType>
void factorize(const MatrixType& mat);
/** Computes or re-computes the incomplete Cholesky factorization of the input matrix \a mat
*
* It is a shortcut for a sequential call to the analyzePattern() and factorize() methods.
*
* \sa analyzePattern(), factorize()
*/
template<typename MatrixType>
void compute(const MatrixType& mat)
{
analyzePattern(mat);
factorize(mat);
}
// internal
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
eigen_assert(m_factorizationIsOk && "factorize() should be called first");
if (m_perm.rows() == b.rows()) x = m_perm * b;
else x = b;
x = m_scale.asDiagonal() * x;
x = m_L.template triangularView<Lower>().solve(x);
x = m_L.adjoint().template triangularView<Upper>().solve(x);
x = m_scale.asDiagonal() * x;
if (m_perm.rows() == b.rows())
x = m_perm.inverse() * x;
}
/** \returns the sparse lower triangular factor L */
const FactorType& matrixL() const { eigen_assert("m_factorizationIsOk"); return m_L; }
/** \returns a vector representing the scaling factor S */
const VectorRx& scalingS() const { eigen_assert("m_factorizationIsOk"); return m_scale; }
/** \returns the fill-in reducing permutation P (can be empty for a natural ordering) */
const PermutationType& permutationP() const { eigen_assert("m_analysisIsOk"); return m_perm; }
protected:
FactorType m_L; // The lower part stored in CSC
VectorRx m_scale; // The vector for scaling the matrix
RealScalar m_initialShift; // The initial shift parameter
bool m_analysisIsOk;
bool m_factorizationIsOk;
ComputationInfo m_info;
PermutationType m_perm;
private:
inline void updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol);
};
// Based on the following paper:
// C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
// Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999
// http://ftp.mcs.anl.gov/pub/tech_reports/reports/P682.pdf
template<typename Scalar, int _UpLo, typename OrderingType>
template<typename _MatrixType>
void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)
{
using std::sqrt;
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
// Dropping strategy : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added
// Apply the fill-reducing permutation computed in analyzePattern()
if (m_perm.rows() == mat.rows() ) // To detect the null permutation
{
// The temporary is needed to make sure that the diagonal entry is properly sorted
FactorType tmp(mat.rows(), mat.cols());
tmp = mat.template selfadjointView<_UpLo>().twistedBy(m_perm);
m_L.template selfadjointView<Lower>() = tmp.template selfadjointView<Lower>();
}
else
{
m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>();
}
Index n = m_L.cols();
Index nnz = m_L.nonZeros();
Map<VectorSx> vals(m_L.valuePtr(), nnz); //values
Map<VectorIx> rowIdx(m_L.innerIndexPtr(), nnz); //Row indices
Map<VectorIx> colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row
VectorIx firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization
VectorList listCol(n); // listCol(j) is a linked list of columns to update column j
VectorSx col_vals(n); // Store a nonzero values in each column
VectorIx col_irow(n); // Row indices of nonzero elements in each column
VectorIx col_pattern(n);
col_pattern.fill(-1);
StorageIndex col_nnz;
// Computes the scaling factors
m_scale.resize(n);
m_scale.setZero();
for (Index j = 0; j < n; j++)
for (Index k = colPtr[j]; k < colPtr[j+1]; k++)
{
m_scale(j) += numext::abs2(vals(k));
if(rowIdx[k]!=j)
m_scale(rowIdx[k]) += numext::abs2(vals(k));
}
m_scale = m_scale.cwiseSqrt().cwiseSqrt();
for (Index j = 0; j < n; ++j)
if(m_scale(j)>(std::numeric_limits<RealScalar>::min)())
m_scale(j) = RealScalar(1)/m_scale(j);
else
m_scale(j) = 1;
// TODO disable scaling if not needed, i.e., if it is roughly uniform? (this will make solve() faster)
// Scale and compute the shift for the matrix
RealScalar mindiag = NumTraits<RealScalar>::highest();
for (Index j = 0; j < n; j++)
{
for (Index k = colPtr[j]; k < colPtr[j+1]; k++)
vals[k] *= (m_scale(j)*m_scale(rowIdx[k]));
eigen_internal_assert(rowIdx[colPtr[j]]==j && "IncompleteCholesky: only the lower triangular part must be stored");
mindiag = numext::mini(numext::real(vals[colPtr[j]]), mindiag);
}
FactorType L_save = m_L;
RealScalar shift = 0;
if(mindiag <= RealScalar(0.))
shift = m_initialShift - mindiag;
m_info = NumericalIssue;
// Try to perform the incomplete factorization using the current shift
int iter = 0;
do
{
// Apply the shift to the diagonal elements of the matrix
for (Index j = 0; j < n; j++)
vals[colPtr[j]] += shift;
// jki version of the Cholesky factorization
Index j=0;
for (; j < n; ++j)
{
// Left-looking factorization of the j-th column
// First, load the j-th column into col_vals
Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored
col_nnz = 0;
for (Index i = colPtr[j] + 1; i < colPtr[j+1]; i++)
{
StorageIndex l = rowIdx[i];
col_vals(col_nnz) = vals[i];
col_irow(col_nnz) = l;
col_pattern(l) = col_nnz;
col_nnz++;
}
{
typename std::list<StorageIndex>::iterator k;
// Browse all previous columns that will update column j
for(k = listCol[j].begin(); k != listCol[j].end(); k++)
{
Index jk = firstElt(*k); // First element to use in the column
eigen_internal_assert(rowIdx[jk]==j);
Scalar v_j_jk = numext::conj(vals[jk]);
jk += 1;
for (Index i = jk; i < colPtr[*k+1]; i++)
{
StorageIndex l = rowIdx[i];
if(col_pattern[l]<0)
{
col_vals(col_nnz) = vals[i] * v_j_jk;
col_irow[col_nnz] = l;
col_pattern(l) = col_nnz;
col_nnz++;
}
else
col_vals(col_pattern[l]) -= vals[i] * v_j_jk;
}
updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol);
}
}
// Scale the current column
if(numext::real(diag) <= 0)
{
if(++iter>=10)
return;
// increase shift
shift = numext::maxi(m_initialShift,RealScalar(2)*shift);
// restore m_L, col_pattern, and listCol
vals = Map<const VectorSx>(L_save.valuePtr(), nnz);
rowIdx = Map<const VectorIx>(L_save.innerIndexPtr(), nnz);
colPtr = Map<const VectorIx>(L_save.outerIndexPtr(), n+1);
col_pattern.fill(-1);
for(Index i=0; i<n; ++i)
listCol[i].clear();
break;
}
RealScalar rdiag = sqrt(numext::real(diag));
vals[colPtr[j]] = rdiag;
for (Index k = 0; k<col_nnz; ++k)
{
Index i = col_irow[k];
//Scale
col_vals(k) /= rdiag;
//Update the remaining diagonals with col_vals
vals[colPtr[i]] -= numext::abs2(col_vals(k));
}
// Select the largest p elements
// p is the original number of elements in the column (without the diagonal)
Index p = colPtr[j+1] - colPtr[j] - 1 ;
Ref<VectorSx> cvals = col_vals.head(col_nnz);
Ref<VectorIx> cirow = col_irow.head(col_nnz);
internal::QuickSplit(cvals,cirow, p);
// Insert the largest p elements in the matrix
Index cpt = 0;
for (Index i = colPtr[j]+1; i < colPtr[j+1]; i++)
{
vals[i] = col_vals(cpt);
rowIdx[i] = col_irow(cpt);
// restore col_pattern:
col_pattern(col_irow(cpt)) = -1;
cpt++;
}
// Get the first smallest row index and put it after the diagonal element
Index jk = colPtr(j)+1;
updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol);
}
if(j==n)
{
m_factorizationIsOk = true;
m_info = Success;
}
} while(m_info!=Success);
}
template<typename Scalar, int _UpLo, typename OrderingType>
inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol)
{
if (jk < colPtr(col+1) )
{
Index p = colPtr(col+1) - jk;
Index minpos;
rowIdx.segment(jk,p).minCoeff(&minpos);
minpos += jk;
if (rowIdx(minpos) != rowIdx(jk))
{
//Swap
std::swap(rowIdx(jk),rowIdx(minpos));
std::swap(vals(jk),vals(minpos));
}
firstElt(col) = internal::convert_index<StorageIndex,Index>(jk);
listCol[rowIdx(jk)].push_back(internal::convert_index<StorageIndex,Index>(col));
}
}
} // end namespace Eigen
#endif
| 15,058 | 36.553616 | 216 |
h
|
abess
|
abess-master/python/include/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
// Copyright (C) 2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_INCOMPLETE_LUT_H
#define EIGEN_INCOMPLETE_LUT_H
namespace Eigen {
namespace internal {
/** \internal
* Compute a quick-sort split of a vector
* On output, the vector row is permuted such that its elements satisfy
* abs(row(i)) >= abs(row(ncut)) if i<ncut
* abs(row(i)) <= abs(row(ncut)) if i>ncut
* \param row The vector of values
* \param ind The array of index for the elements in @p row
* \param ncut The number of largest elements to keep
**/
template <typename VectorV, typename VectorI>
Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
{
typedef typename VectorV::RealScalar RealScalar;
using std::swap;
using std::abs;
Index mid;
Index n = row.size(); /* length of the vector */
Index first, last ;
ncut--; /* to fit the zero-based indices */
first = 0;
last = n-1;
if (ncut < first || ncut > last ) return 0;
do {
mid = first;
RealScalar abskey = abs(row(mid));
for (Index j = first + 1; j <= last; j++) {
if ( abs(row(j)) > abskey) {
++mid;
swap(row(mid), row(j));
swap(ind(mid), ind(j));
}
}
/* Interchange for the pivot element */
swap(row(mid), row(first));
swap(ind(mid), ind(first));
if (mid > ncut) last = mid - 1;
else if (mid < ncut ) first = mid + 1;
} while (mid != ncut );
return 0; /* mid is equal to ncut */
}
}// end namespace internal
/** \ingroup IterativeLinearSolvers_Module
* \class IncompleteLUT
* \brief Incomplete LU factorization with dual-threshold strategy
*
* \implsparsesolverconcept
*
* During the numerical factorization, two dropping rules are used :
* 1) any element whose magnitude is less than some tolerance is dropped.
* This tolerance is obtained by multiplying the input tolerance @p droptol
* by the average magnitude of all the original elements in the current row.
* 2) After the elimination of the row, only the @p fill largest elements in
* the L part and the @p fill largest elements in the U part are kept
* (in addition to the diagonal element ). Note that @p fill is computed from
* the input parameter @p fillfactor which is used the ratio to control the fill_in
* relatively to the initial number of nonzero elements.
*
* The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
* and when @p fill=n/2 with @p droptol being different to zero.
*
* References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
* Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
*
* NOTE : The following implementation is derived from the ILUT implementation
* in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
* released under the terms of the GNU LGPL:
* http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
* However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
* See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
* http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
* alternatively, on GMANE:
* http://comments.gmane.org/gmane.comp.lib.eigen/3302
*/
template <typename _Scalar, typename _StorageIndex = int>
class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageIndex> >
{
protected:
typedef SparseSolverBase<IncompleteLUT> Base;
using Base::m_isInitialized;
public:
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef SparseMatrix<Scalar,RowMajor,StorageIndex> FactorType;
enum {
ColsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic
};
public:
IncompleteLUT()
: m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
m_analysisIsOk(false), m_factorizationIsOk(false)
{}
template<typename MatrixType>
explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
: m_droptol(droptol),m_fillfactor(fillfactor),
m_analysisIsOk(false),m_factorizationIsOk(false)
{
eigen_assert(fillfactor != 0);
compute(mat);
}
Index rows() const { return m_lu.rows(); }
Index cols() const { return m_lu.cols(); }
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
return m_info;
}
template<typename MatrixType>
void analyzePattern(const MatrixType& amat);
template<typename MatrixType>
void factorize(const MatrixType& amat);
/**
* Compute an incomplete LU factorization with dual threshold on the matrix mat
* No pivoting is done in this version
*
**/
template<typename MatrixType>
IncompleteLUT& compute(const MatrixType& amat)
{
analyzePattern(amat);
factorize(amat);
return *this;
}
void setDroptol(const RealScalar& droptol);
void setFillfactor(int fillfactor);
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_Pinv * b;
x = m_lu.template triangularView<UnitLower>().solve(x);
x = m_lu.template triangularView<Upper>().solve(x);
x = m_P * x;
}
protected:
/** keeps off-diagonal entries; drops diagonal entries */
struct keep_diag {
inline bool operator() (const Index& row, const Index& col, const Scalar&) const
{
return row!=col;
}
};
protected:
FactorType m_lu;
RealScalar m_droptol;
int m_fillfactor;
bool m_analysisIsOk;
bool m_factorizationIsOk;
ComputationInfo m_info;
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // Fill-reducing permutation
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // Inverse permutation
};
/**
* Set control parameter droptol
* \param droptol Drop any element whose magnitude is less than this tolerance
**/
template<typename Scalar, typename StorageIndex>
void IncompleteLUT<Scalar,StorageIndex>::setDroptol(const RealScalar& droptol)
{
this->m_droptol = droptol;
}
/**
* Set control parameter fillfactor
* \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row.
**/
template<typename Scalar, typename StorageIndex>
void IncompleteLUT<Scalar,StorageIndex>::setFillfactor(int fillfactor)
{
this->m_fillfactor = fillfactor;
}
template <typename Scalar, typename StorageIndex>
template<typename _MatrixType>
void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)
{
// Compute the Fill-reducing permutation
// Since ILUT does not perform any numerical pivoting,
// it is highly preferable to keep the diagonal through symmetric permutations.
#ifndef EIGEN_MPL2_ONLY
// To this end, let's symmetrize the pattern and perform AMD on it.
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;
AMDOrdering<StorageIndex> ordering;
ordering(AtA,m_P);
m_Pinv = m_P.inverse(); // cache the inverse permutation
#else
// If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
COLAMDOrdering<StorageIndex> ordering;
ordering(mat1,m_Pinv);
m_P = m_Pinv.inverse();
#endif
m_analysisIsOk = true;
m_factorizationIsOk = false;
m_isInitialized = true;
}
template <typename Scalar, typename StorageIndex>
template<typename _MatrixType>
void IncompleteLUT<Scalar,StorageIndex>::factorize(const _MatrixType& amat)
{
using std::sqrt;
using std::swap;
using std::abs;
using internal::convert_index;
eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
Index n = amat.cols(); // Size of the matrix
m_lu.resize(n,n);
// Declare Working vectors and variables
Vector u(n) ; // real values of the row -- maximum size is n --
VectorI ju(n); // column position of the values in u -- maximum size is n
VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
// Apply the fill-reducing permutation
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
SparseMatrix<Scalar,RowMajor, StorageIndex> mat;
mat = amat.twistedBy(m_Pinv);
// Initialization
jr.fill(-1);
ju.fill(0);
u.fill(0);
// number of largest elements to keep in each row:
Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1;
if (fill_in > n) fill_in = n;
// number of largest nonzero elements to keep in the L and the U part of the current row:
Index nnzL = fill_in/2;
Index nnzU = nnzL;
m_lu.reserve(n * (nnzL + nnzU + 1));
// global loop over the rows of the sparse matrix
for (Index ii = 0; ii < n; ii++)
{
// 1 - copy the lower and the upper part of the row i of mat in the working vector u
Index sizeu = 1; // number of nonzero elements in the upper part of the current row
Index sizel = 0; // number of nonzero elements in the lower part of the current row
ju(ii) = convert_index<StorageIndex>(ii);
u(ii) = 0;
jr(ii) = convert_index<StorageIndex>(ii);
RealScalar rownorm = 0;
typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
for (; j_it; ++j_it)
{
Index k = j_it.index();
if (k < ii)
{
// copy the lower part
ju(sizel) = convert_index<StorageIndex>(k);
u(sizel) = j_it.value();
jr(k) = convert_index<StorageIndex>(sizel);
++sizel;
}
else if (k == ii)
{
u(ii) = j_it.value();
}
else
{
// copy the upper part
Index jpos = ii + sizeu;
ju(jpos) = convert_index<StorageIndex>(k);
u(jpos) = j_it.value();
jr(k) = convert_index<StorageIndex>(jpos);
++sizeu;
}
rownorm += numext::abs2(j_it.value());
}
// 2 - detect possible zero row
if(rownorm==0)
{
m_info = NumericalIssue;
return;
}
// Take the 2-norm of the current row as a relative tolerance
rownorm = sqrt(rownorm);
// 3 - eliminate the previous nonzero rows
Index jj = 0;
Index len = 0;
while (jj < sizel)
{
// In order to eliminate in the correct order,
// we must select first the smallest column index among ju(jj:sizel)
Index k;
Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
k += jj;
if (minrow != ju(jj))
{
// swap the two locations
Index j = ju(jj);
swap(ju(jj), ju(k));
jr(minrow) = convert_index<StorageIndex>(jj);
jr(j) = convert_index<StorageIndex>(k);
swap(u(jj), u(k));
}
// Reset this location
jr(minrow) = -1;
// Start elimination
typename FactorType::InnerIterator ki_it(m_lu, minrow);
while (ki_it && ki_it.index() < minrow) ++ki_it;
eigen_internal_assert(ki_it && ki_it.col()==minrow);
Scalar fact = u(jj) / ki_it.value();
// drop too small elements
if(abs(fact) <= m_droptol)
{
jj++;
continue;
}
// linear combination of the current row ii and the row minrow
++ki_it;
for (; ki_it; ++ki_it)
{
Scalar prod = fact * ki_it.value();
Index j = ki_it.index();
Index jpos = jr(j);
if (jpos == -1) // fill-in element
{
Index newpos;
if (j >= ii) // dealing with the upper part
{
newpos = ii + sizeu;
sizeu++;
eigen_internal_assert(sizeu<=n);
}
else // dealing with the lower part
{
newpos = sizel;
sizel++;
eigen_internal_assert(sizel<=ii);
}
ju(newpos) = convert_index<StorageIndex>(j);
u(newpos) = -prod;
jr(j) = convert_index<StorageIndex>(newpos);
}
else
u(jpos) -= prod;
}
// store the pivot element
u(len) = fact;
ju(len) = convert_index<StorageIndex>(minrow);
++len;
jj++;
} // end of the elimination on the row ii
// reset the upper part of the pointer jr to zero
for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
// 4 - partially sort and insert the elements in the m_lu matrix
// sort the L-part of the row
sizel = len;
len = (std::min)(sizel, nnzL);
typename Vector::SegmentReturnType ul(u.segment(0, sizel));
typename VectorI::SegmentReturnType jul(ju.segment(0, sizel));
internal::QuickSplit(ul, jul, len);
// store the largest m_fill elements of the L part
m_lu.startVec(ii);
for(Index k = 0; k < len; k++)
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
// store the diagonal element
// apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
if (u(ii) == Scalar(0))
u(ii) = sqrt(m_droptol) * rownorm;
m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
// sort the U-part of the row
// apply the dropping rule first
len = 0;
for(Index k = 1; k < sizeu; k++)
{
if(abs(u(ii+k)) > m_droptol * rownorm )
{
++len;
u(ii + len) = u(ii + k);
ju(ii + len) = ju(ii + k);
}
}
sizeu = len + 1; // +1 to take into account the diagonal element
len = (std::min)(sizeu, nnzU);
typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
internal::QuickSplit(uu, juu, len);
// store the largest elements of the U part
for(Index k = ii + 1; k < ii + len; k++)
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
}
m_lu.finalize();
m_lu.makeCompressed();
m_factorizationIsOk = true;
m_info = Success;
}
} // end namespace Eigen
#endif // EIGEN_INCOMPLETE_LUT_H
| 15,232 | 31.900648 | 134 |
h
|
abess
|
abess-master/python/include/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H
#define EIGEN_ITERATIVE_SOLVER_BASE_H
namespace Eigen {
namespace internal {
template<typename MatrixType>
struct is_ref_compatible_impl
{
private:
template <typename T0>
struct any_conversion
{
template <typename T> any_conversion(const volatile T&);
template <typename T> any_conversion(T&);
};
struct yes {int a[1];};
struct no {int a[2];};
template<typename T>
static yes test(const Ref<const T>&, int);
template<typename T>
static no test(any_conversion<T>, ...);
public:
static MatrixType ms_from;
enum { value = sizeof(test<MatrixType>(ms_from, 0))==sizeof(yes) };
};
template<typename MatrixType>
struct is_ref_compatible
{
enum { value = is_ref_compatible_impl<typename remove_all<MatrixType>::type>::value };
};
template<typename MatrixType, bool MatrixFree = !internal::is_ref_compatible<MatrixType>::value>
class generic_matrix_wrapper;
// We have an explicit matrix at hand, compatible with Ref<>
template<typename MatrixType>
class generic_matrix_wrapper<MatrixType,false>
{
public:
typedef Ref<const MatrixType> ActualMatrixType;
template<int UpLo> struct ConstSelfAdjointViewReturnType {
typedef typename ActualMatrixType::template ConstSelfAdjointViewReturnType<UpLo>::Type Type;
};
enum {
MatrixFree = false
};
generic_matrix_wrapper()
: m_dummy(0,0), m_matrix(m_dummy)
{}
template<typename InputType>
generic_matrix_wrapper(const InputType &mat)
: m_matrix(mat)
{}
const ActualMatrixType& matrix() const
{
return m_matrix;
}
template<typename MatrixDerived>
void grab(const EigenBase<MatrixDerived> &mat)
{
m_matrix.~Ref<const MatrixType>();
::new (&m_matrix) Ref<const MatrixType>(mat.derived());
}
void grab(const Ref<const MatrixType> &mat)
{
if(&(mat.derived()) != &m_matrix)
{
m_matrix.~Ref<const MatrixType>();
::new (&m_matrix) Ref<const MatrixType>(mat);
}
}
protected:
MatrixType m_dummy; // used to default initialize the Ref<> object
ActualMatrixType m_matrix;
};
// MatrixType is not compatible with Ref<> -> matrix-free wrapper
template<typename MatrixType>
class generic_matrix_wrapper<MatrixType,true>
{
public:
typedef MatrixType ActualMatrixType;
template<int UpLo> struct ConstSelfAdjointViewReturnType
{
typedef ActualMatrixType Type;
};
enum {
MatrixFree = true
};
generic_matrix_wrapper()
: mp_matrix(0)
{}
generic_matrix_wrapper(const MatrixType &mat)
: mp_matrix(&mat)
{}
const ActualMatrixType& matrix() const
{
return *mp_matrix;
}
void grab(const MatrixType &mat)
{
mp_matrix = &mat;
}
protected:
const ActualMatrixType *mp_matrix;
};
}
/** \ingroup IterativeLinearSolvers_Module
* \brief Base class for linear iterative solvers
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename Derived>
class IterativeSolverBase : public SparseSolverBase<Derived>
{
protected:
typedef SparseSolverBase<Derived> Base;
using Base::m_isInitialized;
public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::RealScalar RealScalar;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public:
using Base::derived;
/** Default constructor. */
IterativeSolverBase()
{
init();
}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
*
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
explicit IterativeSolverBase(const EigenBase<MatrixDerived>& A)
: m_matrixWrapper(A.derived())
{
init();
compute(matrix());
}
~IterativeSolverBase() {}
/** Initializes the iterative solver for the sparsity pattern of the matrix \a A for further solving \c Ax=b problems.
*
* Currently, this function mostly calls analyzePattern on the preconditioner. In the future
* we might, for instance, implement column reordering for faster matrix vector products.
*/
template<typename MatrixDerived>
Derived& analyzePattern(const EigenBase<MatrixDerived>& A)
{
grab(A.derived());
m_preconditioner.analyzePattern(matrix());
m_isInitialized = true;
m_analysisIsOk = true;
m_info = m_preconditioner.info();
return derived();
}
/** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems.
*
* Currently, this function mostly calls factorize on the preconditioner.
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
Derived& factorize(const EigenBase<MatrixDerived>& A)
{
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
grab(A.derived());
m_preconditioner.factorize(matrix());
m_factorizationIsOk = true;
m_info = m_preconditioner.info();
return derived();
}
/** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems.
*
* Currently, this function mostly initializes/computes the preconditioner. In the future
* we might, for instance, implement column reordering for faster matrix vector products.
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
Derived& compute(const EigenBase<MatrixDerived>& A)
{
grab(A.derived());
m_preconditioner.compute(matrix());
m_isInitialized = true;
m_analysisIsOk = true;
m_factorizationIsOk = true;
m_info = m_preconditioner.info();
return derived();
}
/** \internal */
Index rows() const { return matrix().rows(); }
/** \internal */
Index cols() const { return matrix().cols(); }
/** \returns the tolerance threshold used by the stopping criteria.
* \sa setTolerance()
*/
RealScalar tolerance() const { return m_tolerance; }
/** Sets the tolerance threshold used by the stopping criteria.
*
* This value is used as an upper bound to the relative residual error: |Ax-b|/|b|.
* The default value is the machine precision given by NumTraits<Scalar>::epsilon()
*/
Derived& setTolerance(const RealScalar& tolerance)
{
m_tolerance = tolerance;
return derived();
}
/** \returns a read-write reference to the preconditioner for custom configuration. */
Preconditioner& preconditioner() { return m_preconditioner; }
/** \returns a read-only reference to the preconditioner. */
const Preconditioner& preconditioner() const { return m_preconditioner; }
/** \returns the max number of iterations.
* It is either the value setted by setMaxIterations or, by default,
* twice the number of columns of the matrix.
*/
Index maxIterations() const
{
return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations;
}
/** Sets the max number of iterations.
* Default is twice the number of columns of the matrix.
*/
Derived& setMaxIterations(Index maxIters)
{
m_maxIterations = maxIters;
return derived();
}
/** \returns the number of iterations performed during the last solve */
Index iterations() const
{
eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
return m_iterations;
}
/** \returns the tolerance error reached during the last solve.
* It is a close approximation of the true relative residual error |Ax-b|/|b|.
*/
RealScalar error() const
{
eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
return m_error;
}
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
* and \a x0 as an initial solution.
*
* \sa solve(), compute()
*/
template<typename Rhs,typename Guess>
inline const SolveWithGuess<Derived, Rhs, Guess>
solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
{
eigen_assert(m_isInitialized && "Solver is not initialized.");
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return SolveWithGuess<Derived, Rhs, Guess>(derived(), b.derived(), x0);
}
/** \returns Success if the iterations converged, and NoConvergence otherwise. */
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
return m_info;
}
/** \internal */
template<typename Rhs, typename DestDerived>
void _solve_impl(const Rhs& b, SparseMatrixBase<DestDerived> &aDest) const
{
eigen_assert(rows()==b.rows());
Index rhsCols = b.cols();
Index size = b.rows();
DestDerived& dest(aDest.derived());
typedef typename DestDerived::Scalar DestScalar;
Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
Eigen::Matrix<DestScalar,Dynamic,1> tx(cols());
// We do not directly fill dest because sparse expressions have to be free of aliasing issue.
// For non square least-square problems, b and dest might not have the same size whereas they might alias each-other.
typename DestDerived::PlainObject tmp(cols(),rhsCols);
for(Index k=0; k<rhsCols; ++k)
{
tb = b.col(k);
tx = derived().solve(tb);
tmp.col(k) = tx.sparseView(0);
}
dest.swap(tmp);
}
protected:
void init()
{
m_isInitialized = false;
m_analysisIsOk = false;
m_factorizationIsOk = false;
m_maxIterations = -1;
m_tolerance = NumTraits<Scalar>::epsilon();
}
typedef internal::generic_matrix_wrapper<MatrixType> MatrixWrapper;
typedef typename MatrixWrapper::ActualMatrixType ActualMatrixType;
const ActualMatrixType& matrix() const
{
return m_matrixWrapper.matrix();
}
template<typename InputType>
void grab(const InputType &A)
{
m_matrixWrapper.grab(A);
}
MatrixWrapper m_matrixWrapper;
Preconditioner m_preconditioner;
Index m_maxIterations;
RealScalar m_tolerance;
mutable RealScalar m_error;
mutable Index m_iterations;
mutable ComputationInfo m_info;
mutable bool m_analysisIsOk, m_factorizationIsOk;
};
} // end namespace Eigen
#endif // EIGEN_ITERATIVE_SOLVER_BASE_H
| 11,527 | 28.18481 | 121 |
h
|
abess
|
abess-master/python/include/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
#define EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
namespace Eigen {
namespace internal {
/** \internal Low-level conjugate gradient algorithm for least-square problems
* \param mat The matrix A
* \param rhs The right hand side vector b
* \param x On input and initial solution, on output the computed solution.
* \param precond A preconditioner being able to efficiently solve for an
* approximation of A'Ax=b (regardless of b)
* \param iters On input the max number of iteration, on output the number of performed iterations.
* \param tol_error On input the tolerance error, on output an estimation of the relative error.
*/
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
EIGEN_DONT_INLINE
void least_square_conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error)
{
using std::sqrt;
using std::abs;
typedef typename Dest::RealScalar RealScalar;
typedef typename Dest::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> VectorType;
RealScalar tol = tol_error;
Index maxIters = iters;
Index m = mat.rows(), n = mat.cols();
VectorType residual = rhs - mat * x;
VectorType normal_residual = mat.adjoint() * residual;
RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm();
if(rhsNorm2 == 0)
{
x.setZero();
iters = 0;
tol_error = 0;
return;
}
RealScalar threshold = tol*tol*rhsNorm2;
RealScalar residualNorm2 = normal_residual.squaredNorm();
if (residualNorm2 < threshold)
{
iters = 0;
tol_error = sqrt(residualNorm2 / rhsNorm2);
return;
}
VectorType p(n);
p = precond.solve(normal_residual); // initial search direction
VectorType z(n), tmp(m);
RealScalar absNew = numext::real(normal_residual.dot(p)); // the square of the absolute value of r scaled by invM
Index i = 0;
while(i < maxIters)
{
tmp.noalias() = mat * p;
Scalar alpha = absNew / tmp.squaredNorm(); // the amount we travel on dir
x += alpha * p; // update solution
residual -= alpha * tmp; // update residual
normal_residual = mat.adjoint() * residual; // update residual of the normal equation
residualNorm2 = normal_residual.squaredNorm();
if(residualNorm2 < threshold)
break;
z = precond.solve(normal_residual); // approximately solve for "A'A z = normal_residual"
RealScalar absOld = absNew;
absNew = numext::real(normal_residual.dot(z)); // update the absolute value of r
RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
p = z + beta * p; // update search direction
i++;
}
tol_error = sqrt(residualNorm2 / rhsNorm2);
iters = i;
}
}
template< typename _MatrixType,
typename _Preconditioner = LeastSquareDiagonalPreconditioner<typename _MatrixType::Scalar> >
class LeastSquaresConjugateGradient;
namespace internal {
template< typename _MatrixType, typename _Preconditioner>
struct traits<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> >
{
typedef _MatrixType MatrixType;
typedef _Preconditioner Preconditioner;
};
}
/** \ingroup IterativeLinearSolvers_Module
* \brief A conjugate gradient solver for sparse (or dense) least-square problems
*
* This class allows to solve for A x = b linear problems using an iterative conjugate gradient algorithm.
* The matrix A can be non symmetric and rectangular, but the matrix A' A should be positive-definite to guaranty stability.
* Otherwise, the SparseLU or SparseQR classes might be preferable.
* The matrix A and the vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
* \tparam _Preconditioner the type of the preconditioner. Default is LeastSquareDiagonalPreconditioner
*
* \implsparsesolverconcept
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
* and NumTraits<Scalar>::epsilon() for the tolerance.
*
* This class can be used as the direct solver classes. Here is a typical usage example:
\code
int m=1000000, n = 10000;
VectorXd x(n), b(m);
SparseMatrix<double> A(m,n);
// fill A and b
LeastSquaresConjugateGradient<SparseMatrix<double> > lscg;
lscg.compute(A);
x = lscg.solve(b);
std::cout << "#iterations: " << lscg.iterations() << std::endl;
std::cout << "estimated error: " << lscg.error() << std::endl;
// update b, and solve again
x = lscg.solve(b);
\endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method.
*
* \sa class ConjugateGradient, SparseLU, SparseQR
*/
template< typename _MatrixType, typename _Preconditioner>
class LeastSquaresConjugateGradient : public IterativeSolverBase<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> >
{
typedef IterativeSolverBase<LeastSquaresConjugateGradient> Base;
using Base::matrix;
using Base::m_error;
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
public:
/** Default constructor. */
LeastSquaresConjugateGradient() : Base() {}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
*
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
explicit LeastSquaresConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
~LeastSquaresConjugateGradient() {}
/** \internal */
template<typename Rhs,typename Dest>
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
for(Index j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
internal::least_square_conjugate_gradient(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
}
m_isInitialized = true;
m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
}
/** \internal */
using Base::_solve_impl;
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const
{
x.setZero();
_solve_with_guess_impl(b.derived(),x);
}
};
} // end namespace Eigen
#endif // EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
| 7,762 | 34.774194 | 127 |
h
|
abess
|
abess-master/python/include/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SOLVEWITHGUESS_H
#define EIGEN_SOLVEWITHGUESS_H
namespace Eigen {
template<typename Decomposition, typename RhsType, typename GuessType> class SolveWithGuess;
/** \class SolveWithGuess
* \ingroup IterativeLinearSolvers_Module
*
* \brief Pseudo expression representing a solving operation
*
* \tparam Decomposition the type of the matrix or decomposion object
* \tparam Rhstype the type of the right-hand side
*
* This class represents an expression of A.solve(B)
* and most of the time this is the only way it is used.
*
*/
namespace internal {
template<typename Decomposition, typename RhsType, typename GuessType>
struct traits<SolveWithGuess<Decomposition, RhsType, GuessType> >
: traits<Solve<Decomposition,RhsType> >
{};
}
template<typename Decomposition, typename RhsType, typename GuessType>
class SolveWithGuess : public internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type
{
public:
typedef typename internal::traits<SolveWithGuess>::Scalar Scalar;
typedef typename internal::traits<SolveWithGuess>::PlainObject PlainObject;
typedef typename internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type Base;
typedef typename internal::ref_selector<SolveWithGuess>::type Nested;
SolveWithGuess(const Decomposition &dec, const RhsType &rhs, const GuessType &guess)
: m_dec(dec), m_rhs(rhs), m_guess(guess)
{}
EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); }
EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); }
EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; }
EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; }
EIGEN_DEVICE_FUNC const GuessType& guess() const { return m_guess; }
protected:
const Decomposition &m_dec;
const RhsType &m_rhs;
const GuessType &m_guess;
private:
Scalar coeff(Index row, Index col) const;
Scalar coeff(Index i) const;
};
namespace internal {
// Evaluator of SolveWithGuess -> eval into a temporary
template<typename Decomposition, typename RhsType, typename GuessType>
struct evaluator<SolveWithGuess<Decomposition,RhsType, GuessType> >
: public evaluator<typename SolveWithGuess<Decomposition,RhsType,GuessType>::PlainObject>
{
typedef SolveWithGuess<Decomposition,RhsType,GuessType> SolveType;
typedef typename SolveType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
evaluator(const SolveType& solve)
: m_result(solve.rows(), solve.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
m_result = solve.guess();
solve.dec()._solve_with_guess_impl(solve.rhs(), m_result);
}
protected:
PlainObject m_result;
};
// Specialization for "dst = dec.solveWithGuess(rhs)"
// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere
template<typename DstXprType, typename DecType, typename RhsType, typename GuessType, typename Scalar>
struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>
{
typedef SolveWithGuess<DecType,RhsType,GuessType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
dst = src.guess();
src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/);
}
};
} // end namepsace internal
} // end namespace Eigen
#endif // EIGEN_SOLVEWITHGUESS_H
| 4,158 | 34.853448 | 171 |
h
|
abess
|
abess-master/python/include/Eigen/src/Jacobi/Jacobi.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Benoit Jacob <[email protected]>
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_JACOBI_H
#define EIGEN_JACOBI_H
namespace Eigen {
/** \ingroup Jacobi_Module
* \jacobi_module
* \class JacobiRotation
* \brief Rotation given by a cosine-sine pair.
*
* This class represents a Jacobi or Givens rotation.
* This is a 2D rotation in the plane \c J of angle \f$ \theta \f$ defined by
* its cosine \c c and sine \c s as follow:
* \f$ J = \left ( \begin{array}{cc} c & \overline s \\ -s & \overline c \end{array} \right ) \f$
*
* You can apply the respective counter-clockwise rotation to a column vector \c v by
* applying its adjoint on the left: \f$ v = J^* v \f$ that translates to the following Eigen code:
* \code
* v.applyOnTheLeft(J.adjoint());
* \endcode
*
* \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar> class JacobiRotation
{
public:
typedef typename NumTraits<Scalar>::Real RealScalar;
/** Default constructor without any initialization. */
JacobiRotation() {}
/** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */
JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {}
Scalar& c() { return m_c; }
Scalar c() const { return m_c; }
Scalar& s() { return m_s; }
Scalar s() const { return m_s; }
/** Concatenates two planar rotation */
JacobiRotation operator*(const JacobiRotation& other)
{
using numext::conj;
return JacobiRotation(m_c * other.m_c - conj(m_s) * other.m_s,
conj(m_c * conj(other.m_s) + conj(m_s) * conj(other.m_c)));
}
/** Returns the transposed transformation */
JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); }
/** Returns the adjoint transformation */
JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); }
template<typename Derived>
bool makeJacobi(const MatrixBase<Derived>&, Index p, Index q);
bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z);
void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
protected:
void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type);
void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type);
Scalar m_c, m_s;
};
/** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix
* \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$
*
* \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar>
bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z)
{
using std::sqrt;
using std::abs;
typedef typename NumTraits<Scalar>::Real RealScalar;
RealScalar deno = RealScalar(2)*abs(y);
if(deno < (std::numeric_limits<RealScalar>::min)())
{
m_c = Scalar(1);
m_s = Scalar(0);
return false;
}
else
{
RealScalar tau = (x-z)/deno;
RealScalar w = sqrt(numext::abs2(tau) + RealScalar(1));
RealScalar t;
if(tau>RealScalar(0))
{
t = RealScalar(1) / (tau + w);
}
else
{
t = RealScalar(1) / (tau - w);
}
RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
RealScalar n = RealScalar(1) / sqrt(numext::abs2(t)+RealScalar(1));
m_s = - sign_t * (numext::conj(y) / abs(y)) * abs(t) * n;
m_c = n;
return true;
}
}
/** Makes \c *this as a Jacobi rotation \c J such that applying \a J on both the right and left sides of the 2x2 selfadjoint matrix
* \f$ B = \left ( \begin{array}{cc} \text{this}_{pp} & \text{this}_{pq} \\ (\text{this}_{pq})^* & \text{this}_{qq} \end{array} \right )\f$ yields
* a diagonal matrix \f$ A = J^* B J \f$
*
* Example: \include Jacobi_makeJacobi.cpp
* Output: \verbinclude Jacobi_makeJacobi.out
*
* \sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar>
template<typename Derived>
inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Index p, Index q)
{
return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q)));
}
/** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector
* \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields:
* \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$.
*
* The value of \a z is returned if \a z is not null (the default is null).
* Also note that G is built such that the cosine is always real.
*
* Example: \include Jacobi_makeGivens.cpp
* Output: \verbinclude Jacobi_makeGivens.out
*
* This function implements the continuous Givens rotation generation algorithm
* found in Anderson (2000), Discontinuous Plane Rotations and the Symmetric Eigenvalue Problem.
* LAPACK Working Note 150, University of Tennessee, UT-CS-00-454, December 4, 2000.
*
* \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar>
void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* z)
{
makeGivens(p, q, z, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());
}
// specialization for complexes
template<typename Scalar>
void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)
{
using std::sqrt;
using std::abs;
using numext::conj;
if(q==Scalar(0))
{
m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1);
m_s = 0;
if(r) *r = m_c * p;
}
else if(p==Scalar(0))
{
m_c = 0;
m_s = -q/abs(q);
if(r) *r = abs(q);
}
else
{
RealScalar p1 = numext::norm1(p);
RealScalar q1 = numext::norm1(q);
if(p1>=q1)
{
Scalar ps = p / p1;
RealScalar p2 = numext::abs2(ps);
Scalar qs = q / p1;
RealScalar q2 = numext::abs2(qs);
RealScalar u = sqrt(RealScalar(1) + q2/p2);
if(numext::real(p)<RealScalar(0))
u = -u;
m_c = Scalar(1)/u;
m_s = -qs*conj(ps)*(m_c/p2);
if(r) *r = p * u;
}
else
{
Scalar ps = p / q1;
RealScalar p2 = numext::abs2(ps);
Scalar qs = q / q1;
RealScalar q2 = numext::abs2(qs);
RealScalar u = q1 * sqrt(p2 + q2);
if(numext::real(p)<RealScalar(0))
u = -u;
p1 = abs(p);
ps = p/p1;
m_c = p1/u;
m_s = -conj(ps) * (q/u);
if(r) *r = ps * u;
}
}
}
// specialization for reals
template<typename Scalar>
void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)
{
using std::sqrt;
using std::abs;
if(q==Scalar(0))
{
m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1);
m_s = Scalar(0);
if(r) *r = abs(p);
}
else if(p==Scalar(0))
{
m_c = Scalar(0);
m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1);
if(r) *r = abs(q);
}
else if(abs(p) > abs(q))
{
Scalar t = q/p;
Scalar u = sqrt(Scalar(1) + numext::abs2(t));
if(p<Scalar(0))
u = -u;
m_c = Scalar(1)/u;
m_s = -t * m_c;
if(r) *r = p * u;
}
else
{
Scalar t = p/q;
Scalar u = sqrt(Scalar(1) + numext::abs2(t));
if(q<Scalar(0))
u = -u;
m_s = -Scalar(1)/u;
m_c = -t * m_s;
if(r) *r = q * u;
}
}
/****************************************************************************************
* Implementation of MatrixBase methods
****************************************************************************************/
namespace internal {
/** \jacobi_module
* Applies the clock wise 2D rotation \a j to the set of 2D vectors of cordinates \a x and \a y:
* \f$ \left ( \begin{array}{cc} x \\ y \end{array} \right ) = J \left ( \begin{array}{cc} x \\ y \end{array} \right ) \f$
*
* \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename VectorX, typename VectorY, typename OtherScalar>
void apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j);
}
/** \jacobi_module
* Applies the rotation in the plane \a j to the rows \a p and \a q of \c *this, i.e., it computes B = J * B,
* with \f$ B = \left ( \begin{array}{cc} \text{*this.row}(p) \\ \text{*this.row}(q) \end{array} \right ) \f$.
*
* \sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane()
*/
template<typename Derived>
template<typename OtherScalar>
inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j)
{
RowXpr x(this->row(p));
RowXpr y(this->row(q));
internal::apply_rotation_in_the_plane(x, y, j);
}
/** \ingroup Jacobi_Module
* Applies the rotation in the plane \a j to the columns \a p and \a q of \c *this, i.e., it computes B = B * J
* with \f$ B = \left ( \begin{array}{cc} \text{*this.col}(p) & \text{*this.col}(q) \end{array} \right ) \f$.
*
* \sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane()
*/
template<typename Derived>
template<typename OtherScalar>
inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j)
{
ColXpr x(this->col(p));
ColXpr y(this->col(q));
internal::apply_rotation_in_the_plane(x, y, j.transpose());
}
namespace internal {
template<typename VectorX, typename VectorY, typename OtherScalar>
void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j)
{
typedef typename VectorX::Scalar Scalar;
enum {
PacketSize = packet_traits<Scalar>::size,
OtherPacketSize = packet_traits<OtherScalar>::size
};
typedef typename packet_traits<Scalar>::type Packet;
typedef typename packet_traits<OtherScalar>::type OtherPacket;
eigen_assert(xpr_x.size() == xpr_y.size());
Index size = xpr_x.size();
Index incrx = xpr_x.derived().innerStride();
Index incry = xpr_y.derived().innerStride();
Scalar* EIGEN_RESTRICT x = &xpr_x.derived().coeffRef(0);
Scalar* EIGEN_RESTRICT y = &xpr_y.derived().coeffRef(0);
OtherScalar c = j.c();
OtherScalar s = j.s();
if (c==OtherScalar(1) && s==OtherScalar(0))
return;
/*** dynamic-size vectorized paths ***/
if(VectorX::SizeAtCompileTime == Dynamic &&
(VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
(PacketSize == OtherPacketSize) &&
((incrx==1 && incry==1) || PacketSize == 1))
{
// both vectors are sequentially stored in memory => vectorization
enum { Peeling = 2 };
Index alignedStart = internal::first_default_aligned(y, size);
Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
const OtherPacket pc = pset1<OtherPacket>(c);
const OtherPacket ps = pset1<OtherPacket>(s);
conj_helper<OtherPacket,Packet,NumTraits<OtherScalar>::IsComplex,false> pcj;
conj_helper<OtherPacket,Packet,false,false> pm;
for(Index i=0; i<alignedStart; ++i)
{
Scalar xi = x[i];
Scalar yi = y[i];
x[i] = c * xi + numext::conj(s) * yi;
y[i] = -s * xi + numext::conj(c) * yi;
}
Scalar* EIGEN_RESTRICT px = x + alignedStart;
Scalar* EIGEN_RESTRICT py = y + alignedStart;
if(internal::first_default_aligned(x, size)==alignedStart)
{
for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
{
Packet xi = pload<Packet>(px);
Packet yi = pload<Packet>(py);
pstore(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi)));
pstore(py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi)));
px += PacketSize;
py += PacketSize;
}
}
else
{
Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
{
Packet xi = ploadu<Packet>(px);
Packet xi1 = ploadu<Packet>(px+PacketSize);
Packet yi = pload <Packet>(py);
Packet yi1 = pload <Packet>(py+PacketSize);
pstoreu(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi)));
pstoreu(px+PacketSize, padd(pm.pmul(pc,xi1),pcj.pmul(ps,yi1)));
pstore (py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi)));
pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pm.pmul(ps,xi1)));
px += Peeling*PacketSize;
py += Peeling*PacketSize;
}
if(alignedEnd!=peelingEnd)
{
Packet xi = ploadu<Packet>(x+peelingEnd);
Packet yi = pload <Packet>(y+peelingEnd);
pstoreu(x+peelingEnd, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi)));
pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi)));
}
}
for(Index i=alignedEnd; i<size; ++i)
{
Scalar xi = x[i];
Scalar yi = y[i];
x[i] = c * xi + numext::conj(s) * yi;
y[i] = -s * xi + numext::conj(c) * yi;
}
}
/*** fixed-size vectorized path ***/
else if(VectorX::SizeAtCompileTime != Dynamic &&
(VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
(PacketSize == OtherPacketSize) &&
(EIGEN_PLAIN_ENUM_MIN(evaluator<VectorX>::Alignment, evaluator<VectorY>::Alignment)>0)) // FIXME should be compared to the required alignment
{
const OtherPacket pc = pset1<OtherPacket>(c);
const OtherPacket ps = pset1<OtherPacket>(s);
conj_helper<OtherPacket,Packet,NumTraits<OtherPacket>::IsComplex,false> pcj;
conj_helper<OtherPacket,Packet,false,false> pm;
Scalar* EIGEN_RESTRICT px = x;
Scalar* EIGEN_RESTRICT py = y;
for(Index i=0; i<size; i+=PacketSize)
{
Packet xi = pload<Packet>(px);
Packet yi = pload<Packet>(py);
pstore(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi)));
pstore(py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi)));
px += PacketSize;
py += PacketSize;
}
}
/*** non-vectorized path ***/
else
{
for(Index i=0; i<size; ++i)
{
Scalar xi = *x;
Scalar yi = *y;
*x = c * xi + numext::conj(s) * yi;
*y = -s * xi + numext::conj(c) * yi;
x += incrx;
y += incry;
}
}
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_JACOBI_H
| 14,888 | 32.68552 | 151 |
h
|
abess
|
abess-master/python/include/Eigen/src/LU/Determinant.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_DETERMINANT_H
#define EIGEN_DETERMINANT_H
namespace Eigen {
namespace internal {
template<typename Derived>
inline const typename Derived::Scalar bruteforce_det3_helper
(const MatrixBase<Derived>& matrix, int a, int b, int c)
{
return matrix.coeff(0,a)
* (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));
}
template<typename Derived>
const typename Derived::Scalar bruteforce_det4_helper
(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)
{
return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
* (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
}
template<typename Derived,
int DeterminantType = Derived::RowsAtCompileTime
> struct determinant_impl
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0)
return typename traits<Derived>::Scalar(1);
return m.partialPivLu().determinant();
}
};
template<typename Derived> struct determinant_impl<Derived, 1>
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0);
}
};
template<typename Derived> struct determinant_impl<Derived, 2>
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1);
}
};
template<typename Derived> struct determinant_impl<Derived, 3>
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return bruteforce_det3_helper(m,0,1,2)
- bruteforce_det3_helper(m,1,0,2)
+ bruteforce_det3_helper(m,2,0,1);
}
};
template<typename Derived> struct determinant_impl<Derived, 4>
{
static typename traits<Derived>::Scalar run(const Derived& m)
{
// trick by Martin Costabel to compute 4x4 det with only 30 muls
return bruteforce_det4_helper(m,0,1,2,3)
- bruteforce_det4_helper(m,0,2,1,3)
+ bruteforce_det4_helper(m,0,3,1,2)
+ bruteforce_det4_helper(m,1,2,0,3)
- bruteforce_det4_helper(m,1,3,0,2)
+ bruteforce_det4_helper(m,2,3,0,1);
}
};
} // end namespace internal
/** \lu_module
*
* \returns the determinant of this matrix
*/
template<typename Derived>
inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
{
eigen_assert(rows() == cols());
typedef typename internal::nested_eval<Derived,Base::RowsAtCompileTime>::type Nested;
return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived());
}
} // end namespace Eigen
#endif // EIGEN_DETERMINANT_H
| 3,057 | 28.980392 | 97 |
h
|
abess
|
abess-master/python/include/Eigen/src/LU/FullPivLU.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_LU_H
#define EIGEN_LU_H
namespace Eigen {
namespace internal {
template<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >
: traits<_MatrixType>
{
typedef MatrixXpr XprKind;
typedef SolverStorage StorageKind;
enum { Flags = 0 };
};
} // end namespace internal
/** \ingroup LU_Module
*
* \class FullPivLU
*
* \brief LU decomposition of a matrix with complete pivoting, and related features
*
* \tparam _MatrixType the type of the matrix of which we are computing the LU decomposition
*
* This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is
* decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is
* upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU
* decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any
* zeros are at the end.
*
* This decomposition provides the generic approach to solving systems of linear equations, computing
* the rank, invertibility, inverse, kernel, and determinant.
*
* This LU decomposition is very stable and well tested with large matrices. However there are use cases where the SVD
* decomposition is inherently more stable and/or flexible. For example, when computing the kernel of a matrix,
* working with the SVD allows to select the smallest singular values of the matrix, something that
* the LU decomposition doesn't see.
*
* The data of the LU decomposition can be directly accessed through the methods matrixLU(),
* permutationP(), permutationQ().
*
* As an exemple, here is how the original matrix can be retrieved:
* \include class_FullPivLU.cpp
* Output: \verbinclude class_FullPivLU.out
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse()
*/
template<typename _MatrixType> class FullPivLU
: public SolverBase<FullPivLU<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
typedef SolverBase<FullPivLU> Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(FullPivLU)
// FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int
enum {
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename internal::plain_row_type<MatrixType, StorageIndex>::type IntRowVectorType;
typedef typename internal::plain_col_type<MatrixType, StorageIndex>::type IntColVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType;
typedef typename MatrixType::PlainObject PlainObject;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via LU::compute(const MatrixType&).
*/
FullPivLU();
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa FullPivLU()
*/
FullPivLU(Index rows, Index cols);
/** Constructor.
*
* \param matrix the matrix of which to compute the LU decomposition.
* It is required to be nonzero.
*/
template<typename InputType>
explicit FullPivLU(const EigenBase<InputType>& matrix);
/** \brief Constructs a LU factorization from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref.
*
* \sa FullPivLU(const EigenBase&)
*/
template<typename InputType>
explicit FullPivLU(EigenBase<InputType>& matrix);
/** Computes the LU decomposition of the given matrix.
*
* \param matrix the matrix of which to compute the LU decomposition.
* It is required to be nonzero.
*
* \returns a reference to *this
*/
template<typename InputType>
FullPivLU& compute(const EigenBase<InputType>& matrix) {
m_lu = matrix.derived();
computeInPlace();
return *this;
}
/** \returns the LU decomposition matrix: the upper-triangular part is U, the
* unit-lower-triangular part is L (at least for square matrices; in the non-square
* case, special care is needed, see the documentation of class FullPivLU).
*
* \sa matrixL(), matrixU()
*/
inline const MatrixType& matrixLU() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return m_lu;
}
/** \returns the number of nonzero pivots in the LU decomposition.
* Here nonzero is meant in the exact sense, not in a fuzzy sense.
* So that notion isn't really intrinsically interesting, but it is
* still useful when implementing algorithms.
*
* \sa rank()
*/
inline Index nonzeroPivots() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return m_nonzero_pivots;
}
/** \returns the absolute value of the biggest pivot, i.e. the biggest
* diagonal coefficient of U.
*/
RealScalar maxPivot() const { return m_maxpivot; }
/** \returns the permutation matrix P
*
* \sa permutationQ()
*/
EIGEN_DEVICE_FUNC inline const PermutationPType& permutationP() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return m_p;
}
/** \returns the permutation matrix Q
*
* \sa permutationP()
*/
inline const PermutationQType& permutationQ() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return m_q;
}
/** \returns the kernel of the matrix, also called its null-space. The columns of the returned matrix
* will form a basis of the kernel.
*
* \note If the kernel has dimension zero, then the returned matrix is a column-vector filled with zeros.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*
* Example: \include FullPivLU_kernel.cpp
* Output: \verbinclude FullPivLU_kernel.out
*
* \sa image()
*/
inline const internal::kernel_retval<FullPivLU> kernel() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return internal::kernel_retval<FullPivLU>(*this);
}
/** \returns the image of the matrix, also called its column-space. The columns of the returned matrix
* will form a basis of the image (column-space).
*
* \param originalMatrix the original matrix, of which *this is the LU decomposition.
* The reason why it is needed to pass it here, is that this allows
* a large optimization, as otherwise this method would need to reconstruct it
* from the LU decomposition.
*
* \note If the image has dimension zero, then the returned matrix is a column-vector filled with zeros.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*
* Example: \include FullPivLU_image.cpp
* Output: \verbinclude FullPivLU_image.out
*
* \sa kernel()
*/
inline const internal::image_retval<FullPivLU>
image(const MatrixType& originalMatrix) const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return internal::image_retval<FullPivLU>(*this, originalMatrix);
}
/** \return a solution x to the equation Ax=b, where A is the matrix of which
* *this is the LU decomposition.
*
* \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
* the only requirement in order for the equation to make sense is that
* b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
*
* \returns a solution.
*
* \note_about_checking_solutions
*
* \note_about_arbitrary_choice_of_solution
* \note_about_using_kernel_to_study_multiple_solutions
*
* Example: \include FullPivLU_solve.cpp
* Output: \verbinclude FullPivLU_solve.out
*
* \sa TriangularView::solve(), kernel(), inverse()
*/
// FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.
template<typename Rhs>
inline const Solve<FullPivLU, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return Solve<FullPivLU, Rhs>(*this, b.derived());
}
/** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
the LU decomposition.
*/
inline RealScalar rcond() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return internal::rcond_estimate_helper(m_l1_norm, *this);
}
/** \returns the determinant of the matrix of which
* *this is the LU decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the LU decomposition has already been computed.
*
* \note This is only for square matrices.
*
* \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
* optimized paths.
*
* \warning a determinant can be very big or small, so for matrices
* of large enough dimension, there is a risk of overflow/underflow.
*
* \sa MatrixBase::determinant()
*/
typename internal::traits<MatrixType>::Scalar determinant() const;
/** Allows to prescribe a threshold to be used by certain methods, such as rank(),
* who need to determine when pivots are to be considered nonzero. This is not used for the
* LU decomposition itself.
*
* When it needs to get the threshold value, Eigen calls threshold(). By default, this
* uses a formula to automatically determine a reasonable threshold.
* Once you have called the present method setThreshold(const RealScalar&),
* your value is used instead.
*
* \param threshold The new value to use as the threshold.
*
* A pivot will be considered nonzero if its absolute value is strictly greater than
* \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
* where maxpivot is the biggest pivot.
*
* If you want to come back to the default behavior, call setThreshold(Default_t)
*/
FullPivLU& setThreshold(const RealScalar& threshold)
{
m_usePrescribedThreshold = true;
m_prescribedThreshold = threshold;
return *this;
}
/** Allows to come back to the default behavior, letting Eigen use its default formula for
* determining the threshold.
*
* You should pass the special object Eigen::Default as parameter here.
* \code lu.setThreshold(Eigen::Default); \endcode
*
* See the documentation of setThreshold(const RealScalar&).
*/
FullPivLU& setThreshold(Default_t)
{
m_usePrescribedThreshold = false;
return *this;
}
/** Returns the threshold that will be used by certain methods such as rank().
*
* See the documentation of setThreshold(const RealScalar&).
*/
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
: NumTraits<Scalar>::epsilon() * m_lu.diagonalSize();
}
/** \returns the rank of the matrix of which *this is the LU decomposition.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index rank() const
{
using std::abs;
eigen_assert(m_isInitialized && "LU is not initialized.");
RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
Index result = 0;
for(Index i = 0; i < m_nonzero_pivots; ++i)
result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold);
return result;
}
/** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index dimensionOfKernel() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return cols() - rank();
}
/** \returns true if the matrix of which *this is the LU decomposition represents an injective
* linear map, i.e. has trivial kernel; false otherwise.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isInjective() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return rank() == cols();
}
/** \returns true if the matrix of which *this is the LU decomposition represents a surjective
* linear map; false otherwise.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isSurjective() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return rank() == rows();
}
/** \returns true if the matrix of which *this is the LU decomposition is invertible.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isInvertible() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return isInjective() && (m_lu.rows() == m_lu.cols());
}
/** \returns the inverse of the matrix of which *this is the LU decomposition.
*
* \note If this matrix is not invertible, the returned matrix has undefined coefficients.
* Use isInvertible() to first determine whether this matrix is invertible.
*
* \sa MatrixBase::inverse()
*/
inline const Inverse<FullPivLU> inverse() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!");
return Inverse<FullPivLU>(*this);
}
MatrixType reconstructedMatrix() const;
EIGEN_DEVICE_FUNC inline Index rows() const { return m_lu.rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_lu.cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
template<bool Conjugate, typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void computeInPlace();
MatrixType m_lu;
PermutationPType m_p;
PermutationQType m_q;
IntColVectorType m_rowsTranspositions;
IntRowVectorType m_colsTranspositions;
Index m_nonzero_pivots;
RealScalar m_l1_norm;
RealScalar m_maxpivot, m_prescribedThreshold;
signed char m_det_pq;
bool m_isInitialized, m_usePrescribedThreshold;
};
template<typename MatrixType>
FullPivLU<MatrixType>::FullPivLU()
: m_isInitialized(false), m_usePrescribedThreshold(false)
{
}
template<typename MatrixType>
FullPivLU<MatrixType>::FullPivLU(Index rows, Index cols)
: m_lu(rows, cols),
m_p(rows),
m_q(cols),
m_rowsTranspositions(rows),
m_colsTranspositions(cols),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
}
template<typename MatrixType>
template<typename InputType>
FullPivLU<MatrixType>::FullPivLU(const EigenBase<InputType>& matrix)
: m_lu(matrix.rows(), matrix.cols()),
m_p(matrix.rows()),
m_q(matrix.cols()),
m_rowsTranspositions(matrix.rows()),
m_colsTranspositions(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
compute(matrix.derived());
}
template<typename MatrixType>
template<typename InputType>
FullPivLU<MatrixType>::FullPivLU(EigenBase<InputType>& matrix)
: m_lu(matrix.derived()),
m_p(matrix.rows()),
m_q(matrix.cols()),
m_rowsTranspositions(matrix.rows()),
m_colsTranspositions(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
computeInPlace();
}
template<typename MatrixType>
void FullPivLU<MatrixType>::computeInPlace()
{
check_template_parameters();
// the permutations are stored as int indices, so just to be sure:
eigen_assert(m_lu.rows()<=NumTraits<int>::highest() && m_lu.cols()<=NumTraits<int>::highest());
m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
const Index size = m_lu.diagonalSize();
const Index rows = m_lu.rows();
const Index cols = m_lu.cols();
// will store the transpositions, before we accumulate them at the end.
// can't accumulate on-the-fly because that will be done in reverse order for the rows.
m_rowsTranspositions.resize(m_lu.rows());
m_colsTranspositions.resize(m_lu.cols());
Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0);
for(Index k = 0; k < size; ++k)
{
// First, we need to find the pivot.
// biggest coefficient in the remaining bottom-right corner (starting at row k, col k)
Index row_of_biggest_in_corner, col_of_biggest_in_corner;
typedef internal::scalar_score_coeff_op<Scalar> Scoring;
typedef typename Scoring::result_type Score;
Score biggest_in_corner;
biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k)
.unaryExpr(Scoring())
.maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);
row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner,
col_of_biggest_in_corner += k; // need to add k to them.
if(biggest_in_corner==Score(0))
{
// before exiting, make sure to initialize the still uninitialized transpositions
// in a sane state without destroying what we already have.
m_nonzero_pivots = k;
for(Index i = k; i < size; ++i)
{
m_rowsTranspositions.coeffRef(i) = i;
m_colsTranspositions.coeffRef(i) = i;
}
break;
}
RealScalar abs_pivot = internal::abs_knowing_score<Scalar>()(m_lu(row_of_biggest_in_corner, col_of_biggest_in_corner), biggest_in_corner);
if(abs_pivot > m_maxpivot) m_maxpivot = abs_pivot;
// Now that we've found the pivot, we need to apply the row/col swaps to
// bring it to the location (k,k).
m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner;
m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner;
if(k != row_of_biggest_in_corner) {
m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner));
++number_of_transpositions;
}
if(k != col_of_biggest_in_corner) {
m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner));
++number_of_transpositions;
}
// Now that the pivot is at the right location, we update the remaining
// bottom-right corner by Gaussian elimination.
if(k<rows-1)
m_lu.col(k).tail(rows-k-1) /= m_lu.coeff(k,k);
if(k<size-1)
m_lu.block(k+1,k+1,rows-k-1,cols-k-1).noalias() -= m_lu.col(k).tail(rows-k-1) * m_lu.row(k).tail(cols-k-1);
}
// the main loop is over, we still have to accumulate the transpositions to find the
// permutations P and Q
m_p.setIdentity(rows);
for(Index k = size-1; k >= 0; --k)
m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));
m_q.setIdentity(cols);
for(Index k = 0; k < size; ++k)
m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
m_isInitialized = true;
}
template<typename MatrixType>
typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!");
return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod());
}
/** \returns the matrix represented by the decomposition,
* i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$.
* This function is provided for debug purposes. */
template<typename MatrixType>
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols());
// LU
MatrixType res(m_lu.rows(),m_lu.cols());
// FIXME the .toDenseMatrix() should not be needed...
res = m_lu.leftCols(smalldim)
.template triangularView<UnitLower>().toDenseMatrix()
* m_lu.topRows(smalldim)
.template triangularView<Upper>().toDenseMatrix();
// P^{-1}(LU)
res = m_p.inverse() * res;
// (P^{-1}LU)Q^{-1}
res = res * m_q.inverse();
return res;
}
/********* Implementation of kernel() **************************************************/
namespace internal {
template<typename _MatrixType>
struct kernel_retval<FullPivLU<_MatrixType> >
: kernel_retval_base<FullPivLU<_MatrixType> >
{
EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>)
enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
MatrixType::MaxColsAtCompileTime,
MatrixType::MaxRowsAtCompileTime)
};
template<typename Dest> void evalTo(Dest& dst) const
{
using std::abs;
const Index cols = dec().matrixLU().cols(), dimker = cols - rank();
if(dimker == 0)
{
// The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's
// avoid crashing/asserting as that depends on floating point calculations. Let's
// just return a single column vector filled with zeros.
dst.setZero();
return;
}
/* Let us use the following lemma:
*
* Lemma: If the matrix A has the LU decomposition PAQ = LU,
* then Ker A = Q(Ker U).
*
* Proof: trivial: just keep in mind that P, Q, L are invertible.
*/
/* Thus, all we need to do is to compute Ker U, and then apply Q.
*
* U is upper triangular, with eigenvalues sorted so that any zeros appear at the end.
* Thus, the diagonal of U ends with exactly
* dimKer zero's. Let us use that to construct dimKer linearly
* independent vectors in Ker U.
*/
Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
Index p = 0;
for(Index i = 0; i < dec().nonzeroPivots(); ++i)
if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
pivots.coeffRef(p++) = i;
eigen_internal_assert(p == rank());
// we construct a temporaty trapezoid matrix m, by taking the U matrix and
// permuting the rows and cols to bring the nonnegligible pivots to the top of
// the main diagonal. We need that to be able to apply our triangular solvers.
// FIXME when we get triangularView-for-rectangular-matrices, this can be simplified
Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, MatrixType::Options,
MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime>
m(dec().matrixLU().block(0, 0, rank(), cols));
for(Index i = 0; i < rank(); ++i)
{
if(i) m.row(i).head(i).setZero();
m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i);
}
m.block(0, 0, rank(), rank());
m.block(0, 0, rank(), rank()).template triangularView<StrictlyLower>().setZero();
for(Index i = 0; i < rank(); ++i)
m.col(i).swap(m.col(pivots.coeff(i)));
// ok, we have our trapezoid matrix, we can apply the triangular solver.
// notice that the math behind this suggests that we should apply this to the
// negative of the RHS, but for performance we just put the negative sign elsewhere, see below.
m.topLeftCorner(rank(), rank())
.template triangularView<Upper>().solveInPlace(
m.topRightCorner(rank(), dimker)
);
// now we must undo the column permutation that we had applied!
for(Index i = rank()-1; i >= 0; --i)
m.col(i).swap(m.col(pivots.coeff(i)));
// see the negative sign in the next line, that's what we were talking about above.
for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker);
for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero();
for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1);
}
};
/***** Implementation of image() *****************************************************/
template<typename _MatrixType>
struct image_retval<FullPivLU<_MatrixType> >
: image_retval_base<FullPivLU<_MatrixType> >
{
EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>)
enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
MatrixType::MaxColsAtCompileTime,
MatrixType::MaxRowsAtCompileTime)
};
template<typename Dest> void evalTo(Dest& dst) const
{
using std::abs;
if(rank() == 0)
{
// The Image is just {0}, so it doesn't have a basis properly speaking, but let's
// avoid crashing/asserting as that depends on floating point calculations. Let's
// just return a single column vector filled with zeros.
dst.setZero();
return;
}
Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
Index p = 0;
for(Index i = 0; i < dec().nonzeroPivots(); ++i)
if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
pivots.coeffRef(p++) = i;
eigen_internal_assert(p == rank());
for(Index i = 0; i < rank(); ++i)
dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i)));
}
};
/***** Implementation of solve() *****************************************************/
} // end namespace internal
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename _MatrixType>
template<typename RhsType, typename DstType>
void FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
/* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}.
* So we proceed as follows:
* Step 1: compute c = P * rhs.
* Step 2: replace c by the solution x to Lx = c. Exists because L is invertible.
* Step 3: replace c by the solution x to Ux = c. May or may not exist.
* Step 4: result = Q * c;
*/
const Index rows = this->rows(),
cols = this->cols(),
nonzero_pivots = this->rank();
eigen_assert(rhs.rows() == rows);
const Index smalldim = (std::min)(rows, cols);
if(nonzero_pivots == 0)
{
dst.setZero();
return;
}
typename RhsType::PlainObject c(rhs.rows(), rhs.cols());
// Step 1
c = permutationP() * rhs;
// Step 2
m_lu.topLeftCorner(smalldim,smalldim)
.template triangularView<UnitLower>()
.solveInPlace(c.topRows(smalldim));
if(rows>cols)
c.bottomRows(rows-cols) -= m_lu.bottomRows(rows-cols) * c.topRows(cols);
// Step 3
m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
.solveInPlace(c.topRows(nonzero_pivots));
// Step 4
for(Index i = 0; i < nonzero_pivots; ++i)
dst.row(permutationQ().indices().coeff(i)) = c.row(i);
for(Index i = nonzero_pivots; i < m_lu.cols(); ++i)
dst.row(permutationQ().indices().coeff(i)).setZero();
}
template<typename _MatrixType>
template<bool Conjugate, typename RhsType, typename DstType>
void FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
{
/* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1},
* and since permutations are real and unitary, we can write this
* as A^T = Q U^T L^T P,
* So we proceed as follows:
* Step 1: compute c = Q^T rhs.
* Step 2: replace c by the solution x to U^T x = c. May or may not exist.
* Step 3: replace c by the solution x to L^T x = c.
* Step 4: result = P^T c.
* If Conjugate is true, replace "^T" by "^*" above.
*/
const Index rows = this->rows(), cols = this->cols(),
nonzero_pivots = this->rank();
eigen_assert(rhs.rows() == cols);
const Index smalldim = (std::min)(rows, cols);
if(nonzero_pivots == 0)
{
dst.setZero();
return;
}
typename RhsType::PlainObject c(rhs.rows(), rhs.cols());
// Step 1
c = permutationQ().inverse() * rhs;
if (Conjugate) {
// Step 2
m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
.adjoint()
.solveInPlace(c.topRows(nonzero_pivots));
// Step 3
m_lu.topLeftCorner(smalldim, smalldim)
.template triangularView<UnitLower>()
.adjoint()
.solveInPlace(c.topRows(smalldim));
} else {
// Step 2
m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
.transpose()
.solveInPlace(c.topRows(nonzero_pivots));
// Step 3
m_lu.topLeftCorner(smalldim, smalldim)
.template triangularView<UnitLower>()
.transpose()
.solveInPlace(c.topRows(smalldim));
}
// Step 4
PermutationPType invp = permutationP().inverse().eval();
for(Index i = 0; i < smalldim; ++i)
dst.row(invp.indices().coeff(i)) = c.row(i);
for(Index i = smalldim; i < rows; ++i)
dst.row(invp.indices().coeff(i)).setZero();
}
#endif
namespace internal {
/***** Implementation of inverse() *****************************************************/
template<typename DstXprType, typename MatrixType>
struct Assignment<DstXprType, Inverse<FullPivLU<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivLU<MatrixType>::Scalar>, Dense2Dense>
{
typedef FullPivLU<MatrixType> LuType;
typedef Inverse<LuType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename MatrixType::Scalar> &)
{
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
}
};
} // end namespace internal
/******* MatrixBase methods *****************************************************************/
/** \lu_module
*
* \return the full-pivoting LU decomposition of \c *this.
*
* \sa class FullPivLU
*/
template<typename Derived>
inline const FullPivLU<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::fullPivLu() const
{
return FullPivLU<PlainObject>(eval());
}
} // end namespace Eigen
#endif // EIGEN_LU_H
| 32,803 | 35.775785 | 164 |
h
|
abess
|
abess-master/python/include/Eigen/src/LU/InverseImpl.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Benoit Jacob <[email protected]>
// Copyright (C) 2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_INVERSE_IMPL_H
#define EIGEN_INVERSE_IMPL_H
namespace Eigen {
namespace internal {
/**********************************
*** General case implementation ***
**********************************/
template<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>
struct compute_inverse
{
EIGEN_DEVICE_FUNC
static inline void run(const MatrixType& matrix, ResultType& result)
{
result = matrix.partialPivLu().inverse();
}
};
template<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>
struct compute_inverse_and_det_with_check { /* nothing! general case not supported. */ };
/****************************
*** Size 1 implementation ***
****************************/
template<typename MatrixType, typename ResultType>
struct compute_inverse<MatrixType, ResultType, 1>
{
EIGEN_DEVICE_FUNC
static inline void run(const MatrixType& matrix, ResultType& result)
{
typedef typename MatrixType::Scalar Scalar;
internal::evaluator<MatrixType> matrixEval(matrix);
result.coeffRef(0,0) = Scalar(1) / matrixEval.coeff(0,0);
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_and_det_with_check<MatrixType, ResultType, 1>
{
EIGEN_DEVICE_FUNC
static inline void run(
const MatrixType& matrix,
const typename MatrixType::RealScalar& absDeterminantThreshold,
ResultType& result,
typename ResultType::Scalar& determinant,
bool& invertible
)
{
using std::abs;
determinant = matrix.coeff(0,0);
invertible = abs(determinant) > absDeterminantThreshold;
if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant;
}
};
/****************************
*** Size 2 implementation ***
****************************/
template<typename MatrixType, typename ResultType>
EIGEN_DEVICE_FUNC
inline void compute_inverse_size2_helper(
const MatrixType& matrix, const typename ResultType::Scalar& invdet,
ResultType& result)
{
result.coeffRef(0,0) = matrix.coeff(1,1) * invdet;
result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
result.coeffRef(1,1) = matrix.coeff(0,0) * invdet;
}
template<typename MatrixType, typename ResultType>
struct compute_inverse<MatrixType, ResultType, 2>
{
EIGEN_DEVICE_FUNC
static inline void run(const MatrixType& matrix, ResultType& result)
{
typedef typename ResultType::Scalar Scalar;
const Scalar invdet = typename MatrixType::Scalar(1) / matrix.determinant();
compute_inverse_size2_helper(matrix, invdet, result);
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_and_det_with_check<MatrixType, ResultType, 2>
{
EIGEN_DEVICE_FUNC
static inline void run(
const MatrixType& matrix,
const typename MatrixType::RealScalar& absDeterminantThreshold,
ResultType& inverse,
typename ResultType::Scalar& determinant,
bool& invertible
)
{
using std::abs;
typedef typename ResultType::Scalar Scalar;
determinant = matrix.determinant();
invertible = abs(determinant) > absDeterminantThreshold;
if(!invertible) return;
const Scalar invdet = Scalar(1) / determinant;
compute_inverse_size2_helper(matrix, invdet, inverse);
}
};
/****************************
*** Size 3 implementation ***
****************************/
template<typename MatrixType, int i, int j>
EIGEN_DEVICE_FUNC
inline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m)
{
enum {
i1 = (i+1) % 3,
i2 = (i+2) % 3,
j1 = (j+1) % 3,
j2 = (j+2) % 3
};
return m.coeff(i1, j1) * m.coeff(i2, j2)
- m.coeff(i1, j2) * m.coeff(i2, j1);
}
template<typename MatrixType, typename ResultType>
EIGEN_DEVICE_FUNC
inline void compute_inverse_size3_helper(
const MatrixType& matrix,
const typename ResultType::Scalar& invdet,
const Matrix<typename ResultType::Scalar,3,1>& cofactors_col0,
ResultType& result)
{
result.row(0) = cofactors_col0 * invdet;
result.coeffRef(1,0) = cofactor_3x3<MatrixType,0,1>(matrix) * invdet;
result.coeffRef(1,1) = cofactor_3x3<MatrixType,1,1>(matrix) * invdet;
result.coeffRef(1,2) = cofactor_3x3<MatrixType,2,1>(matrix) * invdet;
result.coeffRef(2,0) = cofactor_3x3<MatrixType,0,2>(matrix) * invdet;
result.coeffRef(2,1) = cofactor_3x3<MatrixType,1,2>(matrix) * invdet;
result.coeffRef(2,2) = cofactor_3x3<MatrixType,2,2>(matrix) * invdet;
}
template<typename MatrixType, typename ResultType>
struct compute_inverse<MatrixType, ResultType, 3>
{
EIGEN_DEVICE_FUNC
static inline void run(const MatrixType& matrix, ResultType& result)
{
typedef typename ResultType::Scalar Scalar;
Matrix<typename MatrixType::Scalar,3,1> cofactors_col0;
cofactors_col0.coeffRef(0) = cofactor_3x3<MatrixType,0,0>(matrix);
cofactors_col0.coeffRef(1) = cofactor_3x3<MatrixType,1,0>(matrix);
cofactors_col0.coeffRef(2) = cofactor_3x3<MatrixType,2,0>(matrix);
const Scalar det = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();
const Scalar invdet = Scalar(1) / det;
compute_inverse_size3_helper(matrix, invdet, cofactors_col0, result);
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_and_det_with_check<MatrixType, ResultType, 3>
{
EIGEN_DEVICE_FUNC
static inline void run(
const MatrixType& matrix,
const typename MatrixType::RealScalar& absDeterminantThreshold,
ResultType& inverse,
typename ResultType::Scalar& determinant,
bool& invertible
)
{
using std::abs;
typedef typename ResultType::Scalar Scalar;
Matrix<Scalar,3,1> cofactors_col0;
cofactors_col0.coeffRef(0) = cofactor_3x3<MatrixType,0,0>(matrix);
cofactors_col0.coeffRef(1) = cofactor_3x3<MatrixType,1,0>(matrix);
cofactors_col0.coeffRef(2) = cofactor_3x3<MatrixType,2,0>(matrix);
determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();
invertible = abs(determinant) > absDeterminantThreshold;
if(!invertible) return;
const Scalar invdet = Scalar(1) / determinant;
compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse);
}
};
/****************************
*** Size 4 implementation ***
****************************/
template<typename Derived>
EIGEN_DEVICE_FUNC
inline const typename Derived::Scalar general_det3_helper
(const MatrixBase<Derived>& matrix, int i1, int i2, int i3, int j1, int j2, int j3)
{
return matrix.coeff(i1,j1)
* (matrix.coeff(i2,j2) * matrix.coeff(i3,j3) - matrix.coeff(i2,j3) * matrix.coeff(i3,j2));
}
template<typename MatrixType, int i, int j>
EIGEN_DEVICE_FUNC
inline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix)
{
enum {
i1 = (i+1) % 4,
i2 = (i+2) % 4,
i3 = (i+3) % 4,
j1 = (j+1) % 4,
j2 = (j+2) % 4,
j3 = (j+3) % 4
};
return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3)
+ general_det3_helper(matrix, i2, i3, i1, j1, j2, j3)
+ general_det3_helper(matrix, i3, i1, i2, j1, j2, j3);
}
template<int Arch, typename Scalar, typename MatrixType, typename ResultType>
struct compute_inverse_size4
{
EIGEN_DEVICE_FUNC
static void run(const MatrixType& matrix, ResultType& result)
{
result.coeffRef(0,0) = cofactor_4x4<MatrixType,0,0>(matrix);
result.coeffRef(1,0) = -cofactor_4x4<MatrixType,0,1>(matrix);
result.coeffRef(2,0) = cofactor_4x4<MatrixType,0,2>(matrix);
result.coeffRef(3,0) = -cofactor_4x4<MatrixType,0,3>(matrix);
result.coeffRef(0,2) = cofactor_4x4<MatrixType,2,0>(matrix);
result.coeffRef(1,2) = -cofactor_4x4<MatrixType,2,1>(matrix);
result.coeffRef(2,2) = cofactor_4x4<MatrixType,2,2>(matrix);
result.coeffRef(3,2) = -cofactor_4x4<MatrixType,2,3>(matrix);
result.coeffRef(0,1) = -cofactor_4x4<MatrixType,1,0>(matrix);
result.coeffRef(1,1) = cofactor_4x4<MatrixType,1,1>(matrix);
result.coeffRef(2,1) = -cofactor_4x4<MatrixType,1,2>(matrix);
result.coeffRef(3,1) = cofactor_4x4<MatrixType,1,3>(matrix);
result.coeffRef(0,3) = -cofactor_4x4<MatrixType,3,0>(matrix);
result.coeffRef(1,3) = cofactor_4x4<MatrixType,3,1>(matrix);
result.coeffRef(2,3) = -cofactor_4x4<MatrixType,3,2>(matrix);
result.coeffRef(3,3) = cofactor_4x4<MatrixType,3,3>(matrix);
result /= (matrix.col(0).cwiseProduct(result.row(0).transpose())).sum();
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse<MatrixType, ResultType, 4>
: compute_inverse_size4<Architecture::Target, typename MatrixType::Scalar,
MatrixType, ResultType>
{
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_and_det_with_check<MatrixType, ResultType, 4>
{
EIGEN_DEVICE_FUNC
static inline void run(
const MatrixType& matrix,
const typename MatrixType::RealScalar& absDeterminantThreshold,
ResultType& inverse,
typename ResultType::Scalar& determinant,
bool& invertible
)
{
using std::abs;
determinant = matrix.determinant();
invertible = abs(determinant) > absDeterminantThreshold;
if(invertible) compute_inverse<MatrixType, ResultType>::run(matrix, inverse);
}
};
/*************************
*** MatrixBase methods ***
*************************/
} // end namespace internal
namespace internal {
// Specialization for "dense = dense_xpr.inverse()"
template<typename DstXprType, typename XprType>
struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar>, Dense2Dense>
{
typedef Inverse<XprType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime);
EIGEN_ONLY_USED_FOR_DEBUG(Size);
eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst)))
&& "Aliasing problem detected in inverse(), you need to do inverse().eval() here.");
typedef typename internal::nested_eval<XprType,XprType::ColsAtCompileTime>::type ActualXprType;
typedef typename internal::remove_all<ActualXprType>::type ActualXprTypeCleanded;
ActualXprType actual_xpr(src.nestedExpression());
compute_inverse<ActualXprTypeCleanded, DstXprType>::run(actual_xpr, dst);
}
};
} // end namespace internal
/** \lu_module
*
* \returns the matrix inverse of this matrix.
*
* For small fixed sizes up to 4x4, this method uses cofactors.
* In the general case, this method uses class PartialPivLU.
*
* \note This matrix must be invertible, otherwise the result is undefined. If you need an
* invertibility check, do the following:
* \li for fixed sizes up to 4x4, use computeInverseAndDetWithCheck().
* \li for the general case, use class FullPivLU.
*
* Example: \include MatrixBase_inverse.cpp
* Output: \verbinclude MatrixBase_inverse.out
*
* \sa computeInverseAndDetWithCheck()
*/
template<typename Derived>
inline const Inverse<Derived> MatrixBase<Derived>::inverse() const
{
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
eigen_assert(rows() == cols());
return Inverse<Derived>(derived());
}
/** \lu_module
*
* Computation of matrix inverse and determinant, with invertibility check.
*
* This is only for fixed-size square matrices of size up to 4x4.
*
* \param inverse Reference to the matrix in which to store the inverse.
* \param determinant Reference to the variable in which to store the determinant.
* \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
* \param absDeterminantThreshold Optional parameter controlling the invertibility check.
* The matrix will be declared invertible if the absolute value of its
* determinant is greater than this threshold.
*
* Example: \include MatrixBase_computeInverseAndDetWithCheck.cpp
* Output: \verbinclude MatrixBase_computeInverseAndDetWithCheck.out
*
* \sa inverse(), computeInverseWithCheck()
*/
template<typename Derived>
template<typename ResultType>
inline void MatrixBase<Derived>::computeInverseAndDetWithCheck(
ResultType& inverse,
typename ResultType::Scalar& determinant,
bool& invertible,
const RealScalar& absDeterminantThreshold
) const
{
// i'd love to put some static assertions there, but SFINAE means that they have no effect...
eigen_assert(rows() == cols());
// for 2x2, it's worth giving a chance to avoid evaluating.
// for larger sizes, evaluating has negligible cost and limits code size.
typedef typename internal::conditional<
RowsAtCompileTime == 2,
typename internal::remove_all<typename internal::nested_eval<Derived, 2>::type>::type,
PlainObject
>::type MatrixType;
internal::compute_inverse_and_det_with_check<MatrixType, ResultType>::run
(derived(), absDeterminantThreshold, inverse, determinant, invertible);
}
/** \lu_module
*
* Computation of matrix inverse, with invertibility check.
*
* This is only for fixed-size square matrices of size up to 4x4.
*
* \param inverse Reference to the matrix in which to store the inverse.
* \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
* \param absDeterminantThreshold Optional parameter controlling the invertibility check.
* The matrix will be declared invertible if the absolute value of its
* determinant is greater than this threshold.
*
* Example: \include MatrixBase_computeInverseWithCheck.cpp
* Output: \verbinclude MatrixBase_computeInverseWithCheck.out
*
* \sa inverse(), computeInverseAndDetWithCheck()
*/
template<typename Derived>
template<typename ResultType>
inline void MatrixBase<Derived>::computeInverseWithCheck(
ResultType& inverse,
bool& invertible,
const RealScalar& absDeterminantThreshold
) const
{
RealScalar determinant;
// i'd love to put some static assertions there, but SFINAE means that they have no effect...
eigen_assert(rows() == cols());
computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold);
}
} // end namespace Eigen
#endif // EIGEN_INVERSE_IMPL_H
| 15,068 | 35.223558 | 140 |
h
|
abess
|
abess-master/python/include/Eigen/src/LU/PartialPivLU.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2009 Benoit Jacob <[email protected]>
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARTIALLU_H
#define EIGEN_PARTIALLU_H
namespace Eigen {
namespace internal {
template<typename _MatrixType> struct traits<PartialPivLU<_MatrixType> >
: traits<_MatrixType>
{
typedef MatrixXpr XprKind;
typedef SolverStorage StorageKind;
typedef traits<_MatrixType> BaseTraits;
enum {
Flags = BaseTraits::Flags & RowMajorBit,
CoeffReadCost = Dynamic
};
};
template<typename T,typename Derived>
struct enable_if_ref;
// {
// typedef Derived type;
// };
template<typename T,typename Derived>
struct enable_if_ref<Ref<T>,Derived> {
typedef Derived type;
};
} // end namespace internal
/** \ingroup LU_Module
*
* \class PartialPivLU
*
* \brief LU decomposition of a matrix with partial pivoting, and related features
*
* \tparam _MatrixType the type of the matrix of which we are computing the LU decomposition
*
* This class represents a LU decomposition of a \b square \b invertible matrix, with partial pivoting: the matrix A
* is decomposed as A = PLU where L is unit-lower-triangular, U is upper-triangular, and P
* is a permutation matrix.
*
* Typically, partial pivoting LU decomposition is only considered numerically stable for square invertible
* matrices. Thus LAPACK's dgesv and dgesvx require the matrix to be square and invertible. The present class
* does the same. It will assert that the matrix is square, but it won't (actually it can't) check that the
* matrix is invertible: it is your task to check that you only use this decomposition on invertible matrices.
*
* The guaranteed safe alternative, working for all matrices, is the full pivoting LU decomposition, provided
* by class FullPivLU.
*
* This is \b not a rank-revealing LU decomposition. Many features are intentionally absent from this class,
* such as rank computation. If you need these features, use class FullPivLU.
*
* This LU decomposition is suitable to invert invertible matrices. It is what MatrixBase::inverse() uses
* in the general case.
* On the other hand, it is \b not suitable to determine whether a given matrix is invertible.
*
* The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP().
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU
*/
template<typename _MatrixType> class PartialPivLU
: public SolverBase<PartialPivLU<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
typedef SolverBase<PartialPivLU> Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(PartialPivLU)
// FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int
enum {
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
typedef typename MatrixType::PlainObject PlainObject;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via PartialPivLU::compute(const MatrixType&).
*/
PartialPivLU();
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa PartialPivLU()
*/
explicit PartialPivLU(Index size);
/** Constructor.
*
* \param matrix the matrix of which to compute the LU decomposition.
*
* \warning The matrix should have full rank (e.g. if it's square, it should be invertible).
* If you need to deal with non-full rank, use class FullPivLU instead.
*/
template<typename InputType>
explicit PartialPivLU(const EigenBase<InputType>& matrix);
/** Constructor for \link InplaceDecomposition inplace decomposition \endlink
*
* \param matrix the matrix of which to compute the LU decomposition.
*
* \warning The matrix should have full rank (e.g. if it's square, it should be invertible).
* If you need to deal with non-full rank, use class FullPivLU instead.
*/
template<typename InputType>
explicit PartialPivLU(EigenBase<InputType>& matrix);
template<typename InputType>
PartialPivLU& compute(const EigenBase<InputType>& matrix) {
m_lu = matrix.derived();
compute();
return *this;
}
/** \returns the LU decomposition matrix: the upper-triangular part is U, the
* unit-lower-triangular part is L (at least for square matrices; in the non-square
* case, special care is needed, see the documentation of class FullPivLU).
*
* \sa matrixL(), matrixU()
*/
inline const MatrixType& matrixLU() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return m_lu;
}
/** \returns the permutation matrix P.
*/
inline const PermutationType& permutationP() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return m_p;
}
/** This method returns the solution x to the equation Ax=b, where A is the matrix of which
* *this is the LU decomposition.
*
* \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
* the only requirement in order for the equation to make sense is that
* b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
*
* \returns the solution.
*
* Example: \include PartialPivLU_solve.cpp
* Output: \verbinclude PartialPivLU_solve.out
*
* Since this PartialPivLU class assumes anyway that the matrix A is invertible, the solution
* theoretically exists and is unique regardless of b.
*
* \sa TriangularView::solve(), inverse(), computeInverse()
*/
// FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.
template<typename Rhs>
inline const Solve<PartialPivLU, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return Solve<PartialPivLU, Rhs>(*this, b.derived());
}
/** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
the LU decomposition.
*/
inline RealScalar rcond() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return internal::rcond_estimate_helper(m_l1_norm, *this);
}
/** \returns the inverse of the matrix of which *this is the LU decomposition.
*
* \warning The matrix being decomposed here is assumed to be invertible. If you need to check for
* invertibility, use class FullPivLU instead.
*
* \sa MatrixBase::inverse(), LU::inverse()
*/
inline const Inverse<PartialPivLU> inverse() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return Inverse<PartialPivLU>(*this);
}
/** \returns the determinant of the matrix of which
* *this is the LU decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the LU decomposition has already been computed.
*
* \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
* optimized paths.
*
* \warning a determinant can be very big or small, so for matrices
* of large enough dimension, there is a risk of overflow/underflow.
*
* \sa MatrixBase::determinant()
*/
Scalar determinant() const;
MatrixType reconstructedMatrix() const;
inline Index rows() const { return m_lu.rows(); }
inline Index cols() const { return m_lu.cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const {
/* The decomposition PA = LU can be rewritten as A = P^{-1} L U.
* So we proceed as follows:
* Step 1: compute c = Pb.
* Step 2: replace c by the solution x to Lx = c.
* Step 3: replace c by the solution x to Ux = c.
*/
eigen_assert(rhs.rows() == m_lu.rows());
// Step 1
dst = permutationP() * rhs;
// Step 2
m_lu.template triangularView<UnitLower>().solveInPlace(dst);
// Step 3
m_lu.template triangularView<Upper>().solveInPlace(dst);
}
template<bool Conjugate, typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const {
/* The decomposition PA = LU can be rewritten as A = P^{-1} L U.
* So we proceed as follows:
* Step 1: compute c = Pb.
* Step 2: replace c by the solution x to Lx = c.
* Step 3: replace c by the solution x to Ux = c.
*/
eigen_assert(rhs.rows() == m_lu.cols());
if (Conjugate) {
// Step 1
dst = m_lu.template triangularView<Upper>().adjoint().solve(rhs);
// Step 2
m_lu.template triangularView<UnitLower>().adjoint().solveInPlace(dst);
} else {
// Step 1
dst = m_lu.template triangularView<Upper>().transpose().solve(rhs);
// Step 2
m_lu.template triangularView<UnitLower>().transpose().solveInPlace(dst);
}
// Step 3
dst = permutationP().transpose() * dst;
}
#endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void compute();
MatrixType m_lu;
PermutationType m_p;
TranspositionType m_rowsTranspositions;
RealScalar m_l1_norm;
signed char m_det_p;
bool m_isInitialized;
};
template<typename MatrixType>
PartialPivLU<MatrixType>::PartialPivLU()
: m_lu(),
m_p(),
m_rowsTranspositions(),
m_l1_norm(0),
m_det_p(0),
m_isInitialized(false)
{
}
template<typename MatrixType>
PartialPivLU<MatrixType>::PartialPivLU(Index size)
: m_lu(size, size),
m_p(size),
m_rowsTranspositions(size),
m_l1_norm(0),
m_det_p(0),
m_isInitialized(false)
{
}
template<typename MatrixType>
template<typename InputType>
PartialPivLU<MatrixType>::PartialPivLU(const EigenBase<InputType>& matrix)
: m_lu(matrix.rows(),matrix.cols()),
m_p(matrix.rows()),
m_rowsTranspositions(matrix.rows()),
m_l1_norm(0),
m_det_p(0),
m_isInitialized(false)
{
compute(matrix.derived());
}
template<typename MatrixType>
template<typename InputType>
PartialPivLU<MatrixType>::PartialPivLU(EigenBase<InputType>& matrix)
: m_lu(matrix.derived()),
m_p(matrix.rows()),
m_rowsTranspositions(matrix.rows()),
m_l1_norm(0),
m_det_p(0),
m_isInitialized(false)
{
compute();
}
namespace internal {
/** \internal This is the blocked version of fullpivlu_unblocked() */
template<typename Scalar, int StorageOrder, typename PivIndex>
struct partial_lu_impl
{
// FIXME add a stride to Map, so that the following mapping becomes easier,
// another option would be to create an expression being able to automatically
// warp any Map, Matrix, and Block expressions as a unique type, but since that's exactly
// a Map + stride, why not adding a stride to Map, and convenient ctors from a Matrix,
// and Block.
typedef Map<Matrix<Scalar, Dynamic, Dynamic, StorageOrder> > MapLU;
typedef Block<MapLU, Dynamic, Dynamic> MatrixType;
typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
typedef typename MatrixType::RealScalar RealScalar;
/** \internal performs the LU decomposition in-place of the matrix \a lu
* using an unblocked algorithm.
*
* In addition, this function returns the row transpositions in the
* vector \a row_transpositions which must have a size equal to the number
* of columns of the matrix \a lu, and an integer \a nb_transpositions
* which returns the actual number of transpositions.
*
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
*/
static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions)
{
typedef scalar_score_coeff_op<Scalar> Scoring;
typedef typename Scoring::result_type Score;
const Index rows = lu.rows();
const Index cols = lu.cols();
const Index size = (std::min)(rows,cols);
nb_transpositions = 0;
Index first_zero_pivot = -1;
for(Index k = 0; k < size; ++k)
{
Index rrows = rows-k-1;
Index rcols = cols-k-1;
Index row_of_biggest_in_col;
Score biggest_in_corner
= lu.col(k).tail(rows-k).unaryExpr(Scoring()).maxCoeff(&row_of_biggest_in_col);
row_of_biggest_in_col += k;
row_transpositions[k] = PivIndex(row_of_biggest_in_col);
if(biggest_in_corner != Score(0))
{
if(k != row_of_biggest_in_col)
{
lu.row(k).swap(lu.row(row_of_biggest_in_col));
++nb_transpositions;
}
// FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k)
// overflow but not the actual quotient?
lu.col(k).tail(rrows) /= lu.coeff(k,k);
}
else if(first_zero_pivot==-1)
{
// the pivot is exactly zero, we record the index of the first pivot which is exactly 0,
// and continue the factorization such we still have A = PLU
first_zero_pivot = k;
}
if(k<rows-1)
lu.bottomRightCorner(rrows,rcols).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rcols);
}
return first_zero_pivot;
}
/** \internal performs the LU decomposition in-place of the matrix represented
* by the variables \a rows, \a cols, \a lu_data, and \a lu_stride using a
* recursive, blocked algorithm.
*
* In addition, this function returns the row transpositions in the
* vector \a row_transpositions which must have a size equal to the number
* of columns of the matrix \a lu, and an integer \a nb_transpositions
* which returns the actual number of transpositions.
*
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
*
* \note This very low level interface using pointers, etc. is to:
* 1 - reduce the number of instanciations to the strict minimum
* 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >
*/
static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)
{
MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
MatrixType lu(lu1,0,0,rows,cols);
const Index size = (std::min)(rows,cols);
// if the matrix is too small, no blocking:
if(size<=16)
{
return unblocked_lu(lu, row_transpositions, nb_transpositions);
}
// automatically adjust the number of subdivisions to the size
// of the matrix so that there is enough sub blocks:
Index blockSize;
{
blockSize = size/8;
blockSize = (blockSize/16)*16;
blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize);
}
nb_transpositions = 0;
Index first_zero_pivot = -1;
for(Index k = 0; k < size; k+=blockSize)
{
Index bs = (std::min)(size-k,blockSize); // actual size of the block
Index trows = rows - k - bs; // trailing rows
Index tsize = size - k - bs; // trailing size
// partition the matrix:
// A00 | A01 | A02
// lu = A_0 | A_1 | A_2 = A10 | A11 | A12
// A20 | A21 | A22
BlockType A_0(lu,0,0,rows,k);
BlockType A_2(lu,0,k+bs,rows,tsize);
BlockType A11(lu,k,k,bs,bs);
BlockType A12(lu,k,k+bs,bs,tsize);
BlockType A21(lu,k+bs,k,trows,bs);
BlockType A22(lu,k+bs,k+bs,trows,tsize);
PivIndex nb_transpositions_in_panel;
// recursively call the blocked LU algorithm on [A11^T A21^T]^T
// with a very small blocking size:
Index ret = blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride,
row_transpositions+k, nb_transpositions_in_panel, 16);
if(ret>=0 && first_zero_pivot==-1)
first_zero_pivot = k+ret;
nb_transpositions += nb_transpositions_in_panel;
// update permutations and apply them to A_0
for(Index i=k; i<k+bs; ++i)
{
Index piv = (row_transpositions[i] += internal::convert_index<PivIndex>(k));
A_0.row(i).swap(A_0.row(piv));
}
if(trows)
{
// apply permutations to A_2
for(Index i=k;i<k+bs; ++i)
A_2.row(i).swap(A_2.row(row_transpositions[i]));
// A12 = A11^-1 A12
A11.template triangularView<UnitLower>().solveInPlace(A12);
A22.noalias() -= A21 * A12;
}
}
return first_zero_pivot;
}
};
/** \internal performs the LU decomposition with partial pivoting in-place.
*/
template<typename MatrixType, typename TranspositionType>
void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndex& nb_transpositions)
{
eigen_assert(lu.cols() == row_transpositions.size());
eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
partial_lu_impl
<typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndex>
::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
}
} // end namespace internal
template<typename MatrixType>
void PartialPivLU<MatrixType>::compute()
{
check_template_parameters();
// the row permutation is stored as int indices, so just to be sure:
eigen_assert(m_lu.rows()<NumTraits<int>::highest());
m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
eigen_assert(m_lu.rows() == m_lu.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
const Index size = m_lu.rows();
m_rowsTranspositions.resize(size);
typename TranspositionType::StorageIndex nb_transpositions;
internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);
m_det_p = (nb_transpositions%2) ? -1 : 1;
m_p = m_rowsTranspositions;
m_isInitialized = true;
}
template<typename MatrixType>
typename PartialPivLU<MatrixType>::Scalar PartialPivLU<MatrixType>::determinant() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return Scalar(m_det_p) * m_lu.diagonal().prod();
}
/** \returns the matrix represented by the decomposition,
* i.e., it returns the product: P^{-1} L U.
* This function is provided for debug purpose. */
template<typename MatrixType>
MatrixType PartialPivLU<MatrixType>::reconstructedMatrix() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
// LU
MatrixType res = m_lu.template triangularView<UnitLower>().toDenseMatrix()
* m_lu.template triangularView<Upper>();
// P^{-1}(LU)
res = m_p.inverse() * res;
return res;
}
/***** Implementation details *****************************************************/
namespace internal {
/***** Implementation of inverse() *****************************************************/
template<typename DstXprType, typename MatrixType>
struct Assignment<DstXprType, Inverse<PartialPivLU<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename PartialPivLU<MatrixType>::Scalar>, Dense2Dense>
{
typedef PartialPivLU<MatrixType> LuType;
typedef Inverse<LuType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename LuType::Scalar> &)
{
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
}
};
} // end namespace internal
/******** MatrixBase methods *******/
/** \lu_module
*
* \return the partial-pivoting LU decomposition of \c *this.
*
* \sa class PartialPivLU
*/
template<typename Derived>
inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::partialPivLu() const
{
return PartialPivLU<PlainObject>(eval());
}
/** \lu_module
*
* Synonym of partialPivLu().
*
* \return the partial-pivoting LU decomposition of \c *this.
*
* \sa class PartialPivLU
*/
template<typename Derived>
inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::lu() const
{
return PartialPivLU<PlainObject>(eval());
}
} // end namespace Eigen
#endif // EIGEN_PARTIALLU_H
| 21,478 | 34.096405 | 170 |
h
|
abess
|
abess-master/python/include/Eigen/src/LU/PartialPivLU_LAPACKE.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* LU decomposition with partial pivoting based on LAPACKE_?getrf function.
********************************************************************************
*/
#ifndef EIGEN_PARTIALLU_LAPACK_H
#define EIGEN_PARTIALLU_LAPACK_H
namespace Eigen {
namespace internal {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_LU_PARTPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \
template<int StorageOrder> \
struct partial_lu_impl<EIGTYPE, StorageOrder, lapack_int> \
{ \
/* \internal performs the LU decomposition in-place of the matrix represented */ \
static lapack_int blocked_lu(Index rows, Index cols, EIGTYPE* lu_data, Index luStride, lapack_int* row_transpositions, lapack_int& nb_transpositions, lapack_int maxBlockSize=256) \
{ \
EIGEN_UNUSED_VARIABLE(maxBlockSize);\
lapack_int matrix_order, first_zero_pivot; \
lapack_int m, n, lda, *ipiv, info; \
EIGTYPE* a; \
/* Set up parameters for ?getrf */ \
matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
lda = convert_index<lapack_int>(luStride); \
a = lu_data; \
ipiv = row_transpositions; \
m = convert_index<lapack_int>(rows); \
n = convert_index<lapack_int>(cols); \
nb_transpositions = 0; \
\
info = LAPACKE_##LAPACKE_PREFIX##getrf( matrix_order, m, n, (LAPACKE_TYPE*)a, lda, ipiv ); \
\
for(int i=0;i<m;i++) { ipiv[i]--; if (ipiv[i]!=i) nb_transpositions++; } \
\
eigen_assert(info >= 0); \
/* something should be done with nb_transpositions */ \
\
first_zero_pivot = info; \
return first_zero_pivot; \
} \
};
EIGEN_LAPACKE_LU_PARTPIV(double, double, d)
EIGEN_LAPACKE_LU_PARTPIV(float, float, s)
EIGEN_LAPACKE_LU_PARTPIV(dcomplex, lapack_complex_double, z)
EIGEN_LAPACKE_LU_PARTPIV(scomplex, lapack_complex_float, c)
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARTIALLU_LAPACK_H
| 3,555 | 41.333333 | 182 |
h
|
abess
|
abess-master/python/include/Eigen/src/LU/arch/Inverse_SSE.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2001 Intel Corporation
// Copyright (C) 2010 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// The SSE code for the 4x4 float and double matrix inverse in this file
// comes from the following Intel's library:
// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
//
// Here is the respective copyright and license statement:
//
// Copyright (c) 2001 Intel Corporation.
//
// Permition is granted to use, copy, distribute and prepare derivative works
// of this library for any purpose and without fee, provided, that the above
// copyright notice and this statement appear in all copies.
// Intel makes no representations about the suitability of this software for
// any purpose, and specifically disclaims all warranties.
// See LEGAL.TXT for all the legal information.
#ifndef EIGEN_INVERSE_SSE_H
#define EIGEN_INVERSE_SSE_H
namespace Eigen {
namespace internal {
template<typename MatrixType, typename ResultType>
struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
{
enum {
MatrixAlignment = traits<MatrixType>::Alignment,
ResultAlignment = traits<ResultType>::Alignment,
StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
};
typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType;
static void run(const MatrixType& mat, ResultType& result)
{
ActualMatrixType matrix(mat);
EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
// Load the full matrix into registers
__m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
__m128 _L2 = matrix.template packet<MatrixAlignment>( 4);
__m128 _L3 = matrix.template packet<MatrixAlignment>( 8);
__m128 _L4 = matrix.template packet<MatrixAlignment>(12);
// The inverse is calculated using "Divide and Conquer" technique. The
// original matrix is divide into four 2x2 sub-matrices. Since each
// register holds four matrix element, the smaller matrices are
// represented as a registers. Hence we get a better locality of the
// calculations.
__m128 A, B, C, D; // the four sub-matrices
if(!StorageOrdersMatch)
{
A = _mm_unpacklo_ps(_L1, _L2);
B = _mm_unpacklo_ps(_L3, _L4);
C = _mm_unpackhi_ps(_L1, _L2);
D = _mm_unpackhi_ps(_L3, _L4);
}
else
{
A = _mm_movelh_ps(_L1, _L2);
B = _mm_movehl_ps(_L2, _L1);
C = _mm_movelh_ps(_L3, _L4);
D = _mm_movehl_ps(_L4, _L3);
}
__m128 iA, iB, iC, iD, // partial inverse of the sub-matrices
DC, AB;
__m128 dA, dB, dC, dD; // determinant of the sub-matrices
__m128 det, d, d1, d2;
__m128 rd; // reciprocal of the determinant
// AB = A# * B
AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);
AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));
// DC = D# * C
DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);
DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));
// dA = |A|
dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);
dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));
// dB = |B|
dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);
dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));
// dC = |C|
dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);
dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));
// dD = |D|
dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);
dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));
// d = trace(AB*DC) = trace(A#*B*D#*C)
d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);
// iD = C*A#*B
iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));
iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));
// iA = B*D#*C
iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));
iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));
// d = trace(AB*DC) = trace(A#*B*D#*C) [continue]
d = _mm_add_ps(d, _mm_movehl_ps(d, d));
d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));
d1 = _mm_mul_ss(dA,dD);
d2 = _mm_mul_ss(dB,dC);
// iD = D*|A| - C*A#*B
iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);
// iA = A*|D| - B*D#*C;
iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);
// det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
det = _mm_sub_ss(_mm_add_ss(d1,d2),d);
rd = _mm_div_ss(_mm_set_ss(1.0f), det);
// #ifdef ZERO_SINGULAR
// rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);
// #endif
// iB = D * (A#B)# = D*B#*A
iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));
iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));
// iC = A * (D#C)# = A*C#*D
iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));
iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
rd = _mm_shuffle_ps(rd,rd,0);
rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));
// iB = C*|B| - D*B#*A
iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
// iC = B*|C| - A*C#*D;
iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);
// iX = iX / det
iA = _mm_mul_ps(rd,iA);
iB = _mm_mul_ps(rd,iB);
iC = _mm_mul_ps(rd,iC);
iD = _mm_mul_ps(rd,iD);
Index res_stride = result.outerStride();
float* res = result.data();
pstoret<float, Packet4f, ResultAlignment>(res+0, _mm_shuffle_ps(iA,iB,0x77));
pstoret<float, Packet4f, ResultAlignment>(res+res_stride, _mm_shuffle_ps(iA,iB,0x22));
pstoret<float, Packet4f, ResultAlignment>(res+2*res_stride, _mm_shuffle_ps(iC,iD,0x77));
pstoret<float, Packet4f, ResultAlignment>(res+3*res_stride, _mm_shuffle_ps(iC,iD,0x22));
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
{
enum {
MatrixAlignment = traits<MatrixType>::Alignment,
ResultAlignment = traits<ResultType>::Alignment,
StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
};
typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType;
static void run(const MatrixType& mat, ResultType& result)
{
ActualMatrixType matrix(mat);
const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
// The inverse is calculated using "Divide and Conquer" technique. The
// original matrix is divide into four 2x2 sub-matrices. Since each
// register of the matrix holds two elements, the smaller matrices are
// consisted of two registers. Hence we get a better locality of the
// calculations.
// the four sub-matrices
__m128d A1, A2, B1, B2, C1, C2, D1, D2;
if(StorageOrdersMatch)
{
A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);
A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);
C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
}
else
{
__m128d tmp;
A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);
A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);
tmp = A1;
A1 = _mm_unpacklo_pd(A1,A2);
A2 = _mm_unpackhi_pd(tmp,A2);
tmp = C1;
C1 = _mm_unpacklo_pd(C1,C2);
C2 = _mm_unpackhi_pd(tmp,C2);
B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
tmp = B1;
B1 = _mm_unpacklo_pd(B1,B2);
B2 = _mm_unpackhi_pd(tmp,B2);
tmp = D1;
D1 = _mm_unpacklo_pd(D1,D2);
D2 = _mm_unpackhi_pd(tmp,D2);
}
__m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices
DC1, DC2, AB1, AB2;
__m128d dA, dB, dC, dD; // determinant of the sub-matrices
__m128d det, d1, d2, rd;
// dA = |A|
dA = _mm_shuffle_pd(A2, A2, 1);
dA = _mm_mul_pd(A1, dA);
dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));
// dB = |B|
dB = _mm_shuffle_pd(B2, B2, 1);
dB = _mm_mul_pd(B1, dB);
dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));
// AB = A# * B
AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));
AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));
AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));
AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));
// dC = |C|
dC = _mm_shuffle_pd(C2, C2, 1);
dC = _mm_mul_pd(C1, dC);
dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));
// dD = |D|
dD = _mm_shuffle_pd(D2, D2, 1);
dD = _mm_mul_pd(D1, dD);
dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));
// DC = D# * C
DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));
DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));
DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));
DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));
// rd = trace(AB*DC) = trace(A#*B*D#*C)
d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));
d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));
rd = _mm_add_pd(d1, d2);
rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));
// iD = C*A#*B
iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));
iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));
iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));
iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));
// iA = B*D#*C
iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));
iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));
iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));
iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));
// iD = D*|A| - C*A#*B
dA = _mm_shuffle_pd(dA,dA,0);
iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);
iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);
// iA = A*|D| - B*D#*C;
dD = _mm_shuffle_pd(dD,dD,0);
iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);
iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);
d1 = _mm_mul_sd(dA, dD);
d2 = _mm_mul_sd(dB, dC);
// iB = D * (A#B)# = D*B#*A
iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));
iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));
iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));
iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));
// det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
det = _mm_add_sd(d1, d2);
det = _mm_sub_sd(det, rd);
// iC = A * (D#C)# = A*C#*D
iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));
iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));
iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));
iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));
rd = _mm_div_sd(_mm_set_sd(1.0), det);
// #ifdef ZERO_SINGULAR
// rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);
// #endif
rd = _mm_shuffle_pd(rd,rd,0);
// iB = C*|B| - D*B#*A
dB = _mm_shuffle_pd(dB,dB,0);
iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
d1 = _mm_xor_pd(rd, _Sign_PN);
d2 = _mm_xor_pd(rd, _Sign_NP);
// iC = B*|C| - A*C#*D;
dC = _mm_shuffle_pd(dC,dC,0);
iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
Index res_stride = result.outerStride();
double* res = result.data();
pstoret<double, Packet2d, ResultAlignment>(res+0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+res_stride, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
pstoret<double, Packet2d, ResultAlignment>(res+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+res_stride+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_INVERSE_SSE_H
| 13,669 | 39.324484 | 143 |
h
|
abess
|
abess-master/python/include/Eigen/src/MetisSupport/MetisSupport.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef METIS_SUPPORT_H
#define METIS_SUPPORT_H
namespace Eigen {
/**
* Get the fill-reducing ordering from the METIS package
*
* If A is the original matrix and Ap is the permuted matrix,
* the fill-reducing permutation is defined as follows :
* Row (column) i of A is the matperm(i) row (column) of Ap.
* WARNING: As computed by METIS, this corresponds to the vector iperm (instead of perm)
*/
template <typename StorageIndex>
class MetisOrdering
{
public:
typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> PermutationType;
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
template <typename MatrixType>
void get_symmetrized_graph(const MatrixType& A)
{
Index m = A.cols();
eigen_assert((A.rows() == A.cols()) && "ONLY FOR SQUARED MATRICES");
// Get the transpose of the input matrix
MatrixType At = A.transpose();
// Get the number of nonzeros elements in each row/col of At+A
Index TotNz = 0;
IndexVector visited(m);
visited.setConstant(-1);
for (StorageIndex j = 0; j < m; j++)
{
// Compute the union structure of of A(j,:) and At(j,:)
visited(j) = j; // Do not include the diagonal element
// Get the nonzeros in row/column j of A
for (typename MatrixType::InnerIterator it(A, j); it; ++it)
{
Index idx = it.index(); // Get the row index (for column major) or column index (for row major)
if (visited(idx) != j )
{
visited(idx) = j;
++TotNz;
}
}
//Get the nonzeros in row/column j of At
for (typename MatrixType::InnerIterator it(At, j); it; ++it)
{
Index idx = it.index();
if(visited(idx) != j)
{
visited(idx) = j;
++TotNz;
}
}
}
// Reserve place for A + At
m_indexPtr.resize(m+1);
m_innerIndices.resize(TotNz);
// Now compute the real adjacency list of each column/row
visited.setConstant(-1);
StorageIndex CurNz = 0;
for (StorageIndex j = 0; j < m; j++)
{
m_indexPtr(j) = CurNz;
visited(j) = j; // Do not include the diagonal element
// Add the pattern of row/column j of A to A+At
for (typename MatrixType::InnerIterator it(A,j); it; ++it)
{
StorageIndex idx = it.index(); // Get the row index (for column major) or column index (for row major)
if (visited(idx) != j )
{
visited(idx) = j;
m_innerIndices(CurNz) = idx;
CurNz++;
}
}
//Add the pattern of row/column j of At to A+At
for (typename MatrixType::InnerIterator it(At, j); it; ++it)
{
StorageIndex idx = it.index();
if(visited(idx) != j)
{
visited(idx) = j;
m_innerIndices(CurNz) = idx;
++CurNz;
}
}
}
m_indexPtr(m) = CurNz;
}
template <typename MatrixType>
void operator() (const MatrixType& A, PermutationType& matperm)
{
StorageIndex m = internal::convert_index<StorageIndex>(A.cols()); // must be StorageIndex, because it is passed by address to METIS
IndexVector perm(m),iperm(m);
// First, symmetrize the matrix graph.
get_symmetrized_graph(A);
int output_error;
// Call the fill-reducing routine from METIS
output_error = METIS_NodeND(&m, m_indexPtr.data(), m_innerIndices.data(), NULL, NULL, perm.data(), iperm.data());
if(output_error != METIS_OK)
{
//FIXME The ordering interface should define a class of possible errors
std::cerr << "ERROR WHILE CALLING THE METIS PACKAGE \n";
return;
}
// Get the fill-reducing permutation
//NOTE: If Ap is the permuted matrix then perm and iperm vectors are defined as follows
// Row (column) i of Ap is the perm(i) row(column) of A, and row (column) i of A is the iperm(i) row(column) of Ap
matperm.resize(m);
for (int j = 0; j < m; j++)
matperm.indices()(iperm(j)) = j;
}
protected:
IndexVector m_indexPtr; // Pointer to the adjacenccy list of each row/column
IndexVector m_innerIndices; // Adjacency list
};
}// end namespace eigen
#endif
| 4,586 | 32.23913 | 136 |
h
|
abess
|
abess-master/python/include/Eigen/src/OrderingMethods/Amd.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <[email protected]>
/*
NOTE: this routine has been adapted from the CSparse library:
Copyright (c) 2006, Timothy A. Davis.
http://www.suitesparse.com
CSparse is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
CSparse is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this Module; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "../Core/util/NonMPL2.h"
#ifndef EIGEN_SPARSE_AMD_H
#define EIGEN_SPARSE_AMD_H
namespace Eigen {
namespace internal {
template<typename T> inline T amd_flip(const T& i) { return -i-2; }
template<typename T> inline T amd_unflip(const T& i) { return i<0 ? amd_flip(i) : i; }
template<typename T0, typename T1> inline bool amd_marked(const T0* w, const T1& j) { return w[j]<0; }
template<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); }
/* clear w */
template<typename StorageIndex>
static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n)
{
StorageIndex k;
if(mark < 2 || (mark + lemax < 0))
{
for(k = 0; k < n; k++)
if(w[k] != 0)
w[k] = 1;
mark = 2;
}
return (mark); /* at this point, w[0..n-1] < mark holds */
}
/* depth-first search and postorder of a tree rooted at node j */
template<typename StorageIndex>
StorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack)
{
StorageIndex i, p, top = 0;
if(!head || !next || !post || !stack) return (-1); /* check inputs */
stack[0] = j; /* place j on the stack */
while (top >= 0) /* while (stack is not empty) */
{
p = stack[top]; /* p = top of stack */
i = head[p]; /* i = youngest child of p */
if(i == -1)
{
top--; /* p has no unordered children left */
post[k++] = p; /* node p is the kth postordered node */
}
else
{
head[p] = next[i]; /* remove i from children of p */
stack[++top] = i; /* start dfs on child node i */
}
}
return k;
}
/** \internal
* \ingroup OrderingMethods_Module
* Approximate minimum degree ordering algorithm.
*
* \param[in] C the input selfadjoint matrix stored in compressed column major format.
* \param[out] perm the permutation P reducing the fill-in of the input matrix \a C
*
* Note that the input matrix \a C must be complete, that is both the upper and lower parts have to be stored, as well as the diagonal entries.
* On exit the values of C are destroyed */
template<typename Scalar, typename StorageIndex>
void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,StorageIndex>& C, PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm)
{
using std::sqrt;
StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h;
StorageIndex n = StorageIndex(C.cols());
dense = std::max<StorageIndex> (16, StorageIndex(10 * sqrt(double(n)))); /* find dense threshold */
dense = (std::min)(n-2, dense);
StorageIndex cnz = StorageIndex(C.nonZeros());
perm.resize(n+1);
t = cnz + cnz/5 + 2*n; /* add elbow room to C */
C.resizeNonZeros(t);
// get workspace
ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0);
StorageIndex* len = W;
StorageIndex* nv = W + (n+1);
StorageIndex* next = W + 2*(n+1);
StorageIndex* head = W + 3*(n+1);
StorageIndex* elen = W + 4*(n+1);
StorageIndex* degree = W + 5*(n+1);
StorageIndex* w = W + 6*(n+1);
StorageIndex* hhead = W + 7*(n+1);
StorageIndex* last = perm.indices().data(); /* use P as workspace for last */
/* --- Initialize quotient graph ---------------------------------------- */
StorageIndex* Cp = C.outerIndexPtr();
StorageIndex* Ci = C.innerIndexPtr();
for(k = 0; k < n; k++)
len[k] = Cp[k+1] - Cp[k];
len[n] = 0;
nzmax = t;
for(i = 0; i <= n; i++)
{
head[i] = -1; // degree list i is empty
last[i] = -1;
next[i] = -1;
hhead[i] = -1; // hash list i is empty
nv[i] = 1; // node i is just one node
w[i] = 1; // node i is alive
elen[i] = 0; // Ek of node i is empty
degree[i] = len[i]; // degree of node i
}
mark = internal::cs_wclear<StorageIndex>(0, 0, w, n); /* clear w */
/* --- Initialize degree lists ------------------------------------------ */
for(i = 0; i < n; i++)
{
bool has_diag = false;
for(p = Cp[i]; p<Cp[i+1]; ++p)
if(Ci[p]==i)
{
has_diag = true;
break;
}
d = degree[i];
if(d == 1 && has_diag) /* node i is empty */
{
elen[i] = -2; /* element i is dead */
nel++;
Cp[i] = -1; /* i is a root of assembly tree */
w[i] = 0;
}
else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */
{
nv[i] = 0; /* absorb i into element n */
elen[i] = -1; /* node i is dead */
nel++;
Cp[i] = amd_flip (n);
nv[n]++;
}
else
{
if(head[d] != -1) last[head[d]] = i;
next[i] = head[d]; /* put node i in degree list d */
head[d] = i;
}
}
elen[n] = -2; /* n is a dead element */
Cp[n] = -1; /* n is a root of assembly tree */
w[n] = 0; /* n is a dead element */
while (nel < n) /* while (selecting pivots) do */
{
/* --- Select node of minimum approximate degree -------------------- */
for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {}
if(next[k] != -1) last[next[k]] = -1;
head[mindeg] = next[k]; /* remove k from degree list */
elenk = elen[k]; /* elenk = |Ek| */
nvk = nv[k]; /* # of nodes k represents */
nel += nvk; /* nv[k] nodes of A eliminated */
/* --- Garbage collection ------------------------------------------- */
if(elenk > 0 && cnz + mindeg >= nzmax)
{
for(j = 0; j < n; j++)
{
if((p = Cp[j]) >= 0) /* j is a live node or element */
{
Cp[j] = Ci[p]; /* save first entry of object */
Ci[p] = amd_flip (j); /* first entry is now amd_flip(j) */
}
}
for(q = 0, p = 0; p < cnz; ) /* scan all of memory */
{
if((j = amd_flip (Ci[p++])) >= 0) /* found object j */
{
Ci[q] = Cp[j]; /* restore first entry of object */
Cp[j] = q++; /* new pointer to object j */
for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++];
}
}
cnz = q; /* Ci[cnz...nzmax-1] now free */
}
/* --- Construct new element ---------------------------------------- */
dk = 0;
nv[k] = -nvk; /* flag k as in Lk */
p = Cp[k];
pk1 = (elenk == 0) ? p : cnz; /* do in place if elen[k] == 0 */
pk2 = pk1;
for(k1 = 1; k1 <= elenk + 1; k1++)
{
if(k1 > elenk)
{
e = k; /* search the nodes in k */
pj = p; /* list of nodes starts at Ci[pj]*/
ln = len[k] - elenk; /* length of list of nodes in k */
}
else
{
e = Ci[p++]; /* search the nodes in e */
pj = Cp[e];
ln = len[e]; /* length of list of nodes in e */
}
for(k2 = 1; k2 <= ln; k2++)
{
i = Ci[pj++];
if((nvi = nv[i]) <= 0) continue; /* node i dead, or seen */
dk += nvi; /* degree[Lk] += size of node i */
nv[i] = -nvi; /* negate nv[i] to denote i in Lk*/
Ci[pk2++] = i; /* place i in Lk */
if(next[i] != -1) last[next[i]] = last[i];
if(last[i] != -1) /* remove i from degree list */
{
next[last[i]] = next[i];
}
else
{
head[degree[i]] = next[i];
}
}
if(e != k)
{
Cp[e] = amd_flip (k); /* absorb e into k */
w[e] = 0; /* e is now a dead element */
}
}
if(elenk != 0) cnz = pk2; /* Ci[cnz...nzmax] is free */
degree[k] = dk; /* external degree of k - |Lk\i| */
Cp[k] = pk1; /* element k is in Ci[pk1..pk2-1] */
len[k] = pk2 - pk1;
elen[k] = -2; /* k is now an element */
/* --- Find set differences ----------------------------------------- */
mark = internal::cs_wclear<StorageIndex>(mark, lemax, w, n); /* clear w if necessary */
for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */
{
i = Ci[pk];
if((eln = elen[i]) <= 0) continue;/* skip if elen[i] empty */
nvi = -nv[i]; /* nv[i] was negated */
wnvi = mark - nvi;
for(p = Cp[i]; p <= Cp[i] + eln - 1; p++) /* scan Ei */
{
e = Ci[p];
if(w[e] >= mark)
{
w[e] -= nvi; /* decrement |Le\Lk| */
}
else if(w[e] != 0) /* ensure e is a live element */
{
w[e] = degree[e] + wnvi; /* 1st time e seen in scan 1 */
}
}
}
/* --- Degree update ------------------------------------------------ */
for(pk = pk1; pk < pk2; pk++) /* scan2: degree update */
{
i = Ci[pk]; /* consider node i in Lk */
p1 = Cp[i];
p2 = p1 + elen[i] - 1;
pn = p1;
for(h = 0, d = 0, p = p1; p <= p2; p++) /* scan Ei */
{
e = Ci[p];
if(w[e] != 0) /* e is an unabsorbed element */
{
dext = w[e] - mark; /* dext = |Le\Lk| */
if(dext > 0)
{
d += dext; /* sum up the set differences */
Ci[pn++] = e; /* keep e in Ei */
h += e; /* compute the hash of node i */
}
else
{
Cp[e] = amd_flip (k); /* aggressive absorb. e->k */
w[e] = 0; /* e is a dead element */
}
}
}
elen[i] = pn - p1 + 1; /* elen[i] = |Ei| */
p3 = pn;
p4 = p1 + len[i];
for(p = p2 + 1; p < p4; p++) /* prune edges in Ai */
{
j = Ci[p];
if((nvj = nv[j]) <= 0) continue; /* node j dead or in Lk */
d += nvj; /* degree(i) += |j| */
Ci[pn++] = j; /* place j in node list of i */
h += j; /* compute hash for node i */
}
if(d == 0) /* check for mass elimination */
{
Cp[i] = amd_flip (k); /* absorb i into k */
nvi = -nv[i];
dk -= nvi; /* |Lk| -= |i| */
nvk += nvi; /* |k| += nv[i] */
nel += nvi;
nv[i] = 0;
elen[i] = -1; /* node i is dead */
}
else
{
degree[i] = std::min<StorageIndex> (degree[i], d); /* update degree(i) */
Ci[pn] = Ci[p3]; /* move first node to end */
Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */
Ci[p1] = k; /* add k as 1st element in of Ei */
len[i] = pn - p1 + 1; /* new len of adj. list of node i */
h %= n; /* finalize hash of i */
next[i] = hhead[h]; /* place i in hash bucket */
hhead[h] = i;
last[i] = h; /* save hash of i in last[i] */
}
} /* scan2 is done */
degree[k] = dk; /* finalize |Lk| */
lemax = std::max<StorageIndex>(lemax, dk);
mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax, w, n); /* clear w */
/* --- Supernode detection ------------------------------------------ */
for(pk = pk1; pk < pk2; pk++)
{
i = Ci[pk];
if(nv[i] >= 0) continue; /* skip if i is dead */
h = last[i]; /* scan hash bucket of node i */
i = hhead[h];
hhead[h] = -1; /* hash bucket will be empty */
for(; i != -1 && next[i] != -1; i = next[i], mark++)
{
ln = len[i];
eln = elen[i];
for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark;
jlast = i;
for(j = next[i]; j != -1; ) /* compare i with all j */
{
ok = (len[j] == ln) && (elen[j] == eln);
for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++)
{
if(w[Ci[p]] != mark) ok = 0; /* compare i and j*/
}
if(ok) /* i and j are identical */
{
Cp[j] = amd_flip (i); /* absorb j into i */
nv[i] += nv[j];
nv[j] = 0;
elen[j] = -1; /* node j is dead */
j = next[j]; /* delete j from hash bucket */
next[jlast] = j;
}
else
{
jlast = j; /* j and i are different */
j = next[j];
}
}
}
}
/* --- Finalize new element------------------------------------------ */
for(p = pk1, pk = pk1; pk < pk2; pk++) /* finalize Lk */
{
i = Ci[pk];
if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */
nv[i] = nvi; /* restore nv[i] */
d = degree[i] + dk - nvi; /* compute external degree(i) */
d = std::min<StorageIndex> (d, n - nel - nvi);
if(head[d] != -1) last[head[d]] = i;
next[i] = head[d]; /* put i back in degree list */
last[i] = -1;
head[d] = i;
mindeg = std::min<StorageIndex> (mindeg, d); /* find new minimum degree */
degree[i] = d;
Ci[p++] = i; /* place i in Lk */
}
nv[k] = nvk; /* # nodes absorbed into k */
if((len[k] = p-pk1) == 0) /* length of adj list of element k*/
{
Cp[k] = -1; /* k is a root of the tree */
w[k] = 0; /* k is now a dead element */
}
if(elenk != 0) cnz = p; /* free unused space in Lk */
}
/* --- Postordering ----------------------------------------------------- */
for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);/* fix assembly tree */
for(j = 0; j <= n; j++) head[j] = -1;
for(j = n; j >= 0; j--) /* place unordered nodes in lists */
{
if(nv[j] > 0) continue; /* skip if j is an element */
next[j] = head[Cp[j]]; /* place j in list of its parent */
head[Cp[j]] = j;
}
for(e = n; e >= 0; e--) /* place elements in lists */
{
if(nv[e] <= 0) continue; /* skip unless e is an element */
if(Cp[e] != -1)
{
next[e] = head[Cp[e]]; /* place e in list of its parent */
head[Cp[e]] = e;
}
}
for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */
{
if(Cp[i] == -1) k = internal::cs_tdfs<StorageIndex>(i, k, head, next, perm.indices().data(), w);
}
perm.indices().conservativeResize(n);
}
} // namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSE_AMD_H
| 16,396 | 35.764574 | 144 |
h
|
abess
|
abess-master/python/include/Eigen/src/OrderingMethods/Eigen_Colamd.h
|
// // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Desire Nuentsa Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This file is modified from the colamd/symamd library. The copyright is below
// The authors of the code itself are Stefan I. Larimore and Timothy A.
// Davis ([email protected]), University of Florida. The algorithm was
// developed in collaboration with John Gilbert, Xerox PARC, and Esmond
// Ng, Oak Ridge National Laboratory.
//
// Date:
//
// September 8, 2003. Version 2.3.
//
// Acknowledgements:
//
// This work was supported by the National Science Foundation, under
// grants DMS-9504974 and DMS-9803599.
//
// Notice:
//
// Copyright (c) 1998-2003 by the University of Florida.
// All Rights Reserved.
//
// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
//
// Permission is hereby granted to use, copy, modify, and/or distribute
// this program, provided that the Copyright, this License, and the
// Availability of the original version is retained on all copies and made
// accessible to the end-user of any code or package that includes COLAMD
// or any modified version of COLAMD.
//
// Availability:
//
// The colamd/symamd library is available at
//
// http://www.suitesparse.com
#ifndef EIGEN_COLAMD_H
#define EIGEN_COLAMD_H
namespace internal {
/* Ensure that debugging is turned off: */
#ifndef COLAMD_NDEBUG
#define COLAMD_NDEBUG
#endif /* NDEBUG */
/* ========================================================================== */
/* === Knob and statistics definitions ====================================== */
/* ========================================================================== */
/* size of the knobs [ ] array. Only knobs [0..1] are currently used. */
#define COLAMD_KNOBS 20
/* number of output statistics. Only stats [0..6] are currently used. */
#define COLAMD_STATS 20
/* knobs [0] and stats [0]: dense row knob and output statistic. */
#define COLAMD_DENSE_ROW 0
/* knobs [1] and stats [1]: dense column knob and output statistic. */
#define COLAMD_DENSE_COL 1
/* stats [2]: memory defragmentation count output statistic */
#define COLAMD_DEFRAG_COUNT 2
/* stats [3]: colamd status: zero OK, > 0 warning or notice, < 0 error */
#define COLAMD_STATUS 3
/* stats [4..6]: error info, or info on jumbled columns */
#define COLAMD_INFO1 4
#define COLAMD_INFO2 5
#define COLAMD_INFO3 6
/* error codes returned in stats [3]: */
#define COLAMD_OK (0)
#define COLAMD_OK_BUT_JUMBLED (1)
#define COLAMD_ERROR_A_not_present (-1)
#define COLAMD_ERROR_p_not_present (-2)
#define COLAMD_ERROR_nrow_negative (-3)
#define COLAMD_ERROR_ncol_negative (-4)
#define COLAMD_ERROR_nnz_negative (-5)
#define COLAMD_ERROR_p0_nonzero (-6)
#define COLAMD_ERROR_A_too_small (-7)
#define COLAMD_ERROR_col_length_negative (-8)
#define COLAMD_ERROR_row_index_out_of_bounds (-9)
#define COLAMD_ERROR_out_of_memory (-10)
#define COLAMD_ERROR_internal_error (-999)
/* ========================================================================== */
/* === Definitions ========================================================== */
/* ========================================================================== */
#define ONES_COMPLEMENT(r) (-(r)-1)
/* -------------------------------------------------------------------------- */
#define COLAMD_EMPTY (-1)
/* Row and column status */
#define ALIVE (0)
#define DEAD (-1)
/* Column status */
#define DEAD_PRINCIPAL (-1)
#define DEAD_NON_PRINCIPAL (-2)
/* Macros for row and column status update and checking. */
#define ROW_IS_DEAD(r) ROW_IS_MARKED_DEAD (Row[r].shared2.mark)
#define ROW_IS_MARKED_DEAD(row_mark) (row_mark < ALIVE)
#define ROW_IS_ALIVE(r) (Row [r].shared2.mark >= ALIVE)
#define COL_IS_DEAD(c) (Col [c].start < ALIVE)
#define COL_IS_ALIVE(c) (Col [c].start >= ALIVE)
#define COL_IS_DEAD_PRINCIPAL(c) (Col [c].start == DEAD_PRINCIPAL)
#define KILL_ROW(r) { Row [r].shared2.mark = DEAD ; }
#define KILL_PRINCIPAL_COL(c) { Col [c].start = DEAD_PRINCIPAL ; }
#define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; }
/* ========================================================================== */
/* === Colamd reporting mechanism =========================================== */
/* ========================================================================== */
// == Row and Column structures ==
template <typename IndexType>
struct colamd_col
{
IndexType start ; /* index for A of first row in this column, or DEAD */
/* if column is dead */
IndexType length ; /* number of rows in this column */
union
{
IndexType thickness ; /* number of original columns represented by this */
/* col, if the column is alive */
IndexType parent ; /* parent in parent tree super-column structure, if */
/* the column is dead */
} shared1 ;
union
{
IndexType score ; /* the score used to maintain heap, if col is alive */
IndexType order ; /* pivot ordering of this column, if col is dead */
} shared2 ;
union
{
IndexType headhash ; /* head of a hash bucket, if col is at the head of */
/* a degree list */
IndexType hash ; /* hash value, if col is not in a degree list */
IndexType prev ; /* previous column in degree list, if col is in a */
/* degree list (but not at the head of a degree list) */
} shared3 ;
union
{
IndexType degree_next ; /* next column, if col is in a degree list */
IndexType hash_next ; /* next column, if col is in a hash list */
} shared4 ;
};
template <typename IndexType>
struct Colamd_Row
{
IndexType start ; /* index for A of first col in this row */
IndexType length ; /* number of principal columns in this row */
union
{
IndexType degree ; /* number of principal & non-principal columns in row */
IndexType p ; /* used as a row pointer in init_rows_cols () */
} shared1 ;
union
{
IndexType mark ; /* for computing set differences and marking dead rows*/
IndexType first_column ;/* first column in row (used in garbage collection) */
} shared2 ;
};
/* ========================================================================== */
/* === Colamd recommended memory size ======================================= */
/* ========================================================================== */
/*
The recommended length Alen of the array A passed to colamd is given by
the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro. It returns -1 if any
argument is negative. 2*nnz space is required for the row and column
indices of the matrix. colamd_c (n_col) + colamd_r (n_row) space is
required for the Col and Row arrays, respectively, which are internal to
colamd. An additional n_col space is the minimal amount of "elbow room",
and nnz/5 more space is recommended for run time efficiency.
This macro is not needed when using symamd.
Explicit typecast to IndexType added Sept. 23, 2002, COLAMD version 2.2, to avoid
gcc -pedantic warning messages.
*/
template <typename IndexType>
inline IndexType colamd_c(IndexType n_col)
{ return IndexType( ((n_col) + 1) * sizeof (colamd_col<IndexType>) / sizeof (IndexType) ) ; }
template <typename IndexType>
inline IndexType colamd_r(IndexType n_row)
{ return IndexType(((n_row) + 1) * sizeof (Colamd_Row<IndexType>) / sizeof (IndexType)); }
// Prototypes of non-user callable routines
template <typename IndexType>
static IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> col [], IndexType A [], IndexType p [], IndexType stats[COLAMD_STATS] );
template <typename IndexType>
static void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg);
template <typename IndexType>
static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree);
template <typename IndexType>
static void order_children (IndexType n_col, colamd_col<IndexType> Col [], IndexType p []);
template <typename IndexType>
static void detect_super_cols (colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ;
template <typename IndexType>
static IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType *pfree) ;
template <typename IndexType>
static inline IndexType clear_mark (IndexType n_row, Colamd_Row<IndexType> Row [] ) ;
/* === No debugging ========================================================= */
#define COLAMD_DEBUG0(params) ;
#define COLAMD_DEBUG1(params) ;
#define COLAMD_DEBUG2(params) ;
#define COLAMD_DEBUG3(params) ;
#define COLAMD_DEBUG4(params) ;
#define COLAMD_ASSERT(expression) ((void) 0)
/**
* \brief Returns the recommended value of Alen
*
* Returns recommended value of Alen for use by colamd.
* Returns -1 if any input argument is negative.
* The use of this routine or macro is optional.
* Note that the macro uses its arguments more than once,
* so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED.
*
* \param nnz nonzeros in A
* \param n_row number of rows in A
* \param n_col number of columns in A
* \return recommended value of Alen for use by colamd
*/
template <typename IndexType>
inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col)
{
if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0)
return (-1);
else
return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5));
}
/**
* \brief set default parameters The use of this routine is optional.
*
* Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col)
* entries are removed prior to ordering. Columns with more than
* (knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to
* ordering, and placed last in the output column ordering.
*
* COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1,
* respectively, in colamd.h. Default values of these two knobs
* are both 0.5. Currently, only knobs [0] and knobs [1] are
* used, but future versions may use more knobs. If so, they will
* be properly set to their defaults by the future version of
* colamd_set_defaults, so that the code that calls colamd will
* not need to change, assuming that you either use
* colamd_set_defaults, or pass a (double *) NULL pointer as the
* knobs array to colamd or symamd.
*
* \param knobs parameter settings for colamd
*/
static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])
{
/* === Local variables ================================================== */
int i ;
if (!knobs)
{
return ; /* no knobs to initialize */
}
for (i = 0 ; i < COLAMD_KNOBS ; i++)
{
knobs [i] = 0 ;
}
knobs [COLAMD_DENSE_ROW] = 0.5 ; /* ignore rows over 50% dense */
knobs [COLAMD_DENSE_COL] = 0.5 ; /* ignore columns over 50% dense */
}
/**
* \brief Computes a column ordering using the column approximate minimum degree ordering
*
* Computes a column ordering (Q) of A such that P(AQ)=LU or
* (AQ)'AQ=LL' have less fill-in and require fewer floating point
* operations than factorizing the unpermuted matrix A or A'A,
* respectively.
*
*
* \param n_row number of rows in A
* \param n_col number of columns in A
* \param Alen, size of the array A
* \param A row indices of the matrix, of size ALen
* \param p column pointers of A, of size n_col+1
* \param knobs parameter settings for colamd
* \param stats colamd output statistics and error codes
*/
template <typename IndexType>
static bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS])
{
/* === Local variables ================================================== */
IndexType i ; /* loop index */
IndexType nnz ; /* nonzeros in A */
IndexType Row_size ; /* size of Row [], in integers */
IndexType Col_size ; /* size of Col [], in integers */
IndexType need ; /* minimum required length of A */
Colamd_Row<IndexType> *Row ; /* pointer into A of Row [0..n_row] array */
colamd_col<IndexType> *Col ; /* pointer into A of Col [0..n_col] array */
IndexType n_col2 ; /* number of non-dense, non-empty columns */
IndexType n_row2 ; /* number of non-dense, non-empty rows */
IndexType ngarbage ; /* number of garbage collections performed */
IndexType max_deg ; /* maximum row degree */
double default_knobs [COLAMD_KNOBS] ; /* default knobs array */
/* === Check the input arguments ======================================== */
if (!stats)
{
COLAMD_DEBUG0 (("colamd: stats not present\n")) ;
return (false) ;
}
for (i = 0 ; i < COLAMD_STATS ; i++)
{
stats [i] = 0 ;
}
stats [COLAMD_STATUS] = COLAMD_OK ;
stats [COLAMD_INFO1] = -1 ;
stats [COLAMD_INFO2] = -1 ;
if (!A) /* A is not present */
{
stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ;
COLAMD_DEBUG0 (("colamd: A not present\n")) ;
return (false) ;
}
if (!p) /* p is not present */
{
stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ;
COLAMD_DEBUG0 (("colamd: p not present\n")) ;
return (false) ;
}
if (n_row < 0) /* n_row must be >= 0 */
{
stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ;
stats [COLAMD_INFO1] = n_row ;
COLAMD_DEBUG0 (("colamd: nrow negative %d\n", n_row)) ;
return (false) ;
}
if (n_col < 0) /* n_col must be >= 0 */
{
stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ;
stats [COLAMD_INFO1] = n_col ;
COLAMD_DEBUG0 (("colamd: ncol negative %d\n", n_col)) ;
return (false) ;
}
nnz = p [n_col] ;
if (nnz < 0) /* nnz must be >= 0 */
{
stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ;
stats [COLAMD_INFO1] = nnz ;
COLAMD_DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ;
return (false) ;
}
if (p [0] != 0)
{
stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ;
stats [COLAMD_INFO1] = p [0] ;
COLAMD_DEBUG0 (("colamd: p[0] not zero %d\n", p [0])) ;
return (false) ;
}
/* === If no knobs, set default knobs =================================== */
if (!knobs)
{
colamd_set_defaults (default_knobs) ;
knobs = default_knobs ;
}
/* === Allocate the Row and Col arrays from array A ===================== */
Col_size = colamd_c (n_col) ;
Row_size = colamd_r (n_row) ;
need = 2*nnz + n_col + Col_size + Row_size ;
if (need > Alen)
{
/* not enough space in array A to perform the ordering */
stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ;
stats [COLAMD_INFO1] = need ;
stats [COLAMD_INFO2] = Alen ;
COLAMD_DEBUG0 (("colamd: Need Alen >= %d, given only Alen = %d\n", need,Alen));
return (false) ;
}
Alen -= Col_size + Row_size ;
Col = (colamd_col<IndexType> *) &A [Alen] ;
Row = (Colamd_Row<IndexType> *) &A [Alen + Col_size] ;
/* === Construct the row and column data structures ===================== */
if (!Eigen::internal::init_rows_cols (n_row, n_col, Row, Col, A, p, stats))
{
/* input matrix is invalid */
COLAMD_DEBUG0 (("colamd: Matrix invalid\n")) ;
return (false) ;
}
/* === Initialize scores, kill dense rows/columns ======================= */
Eigen::internal::init_scoring (n_row, n_col, Row, Col, A, p, knobs,
&n_row2, &n_col2, &max_deg) ;
/* === Order the supercolumns =========================================== */
ngarbage = Eigen::internal::find_ordering (n_row, n_col, Alen, Row, Col, A, p,
n_col2, max_deg, 2*nnz) ;
/* === Order the non-principal columns ================================== */
Eigen::internal::order_children (n_col, Col, p) ;
/* === Return statistics in stats ======================================= */
stats [COLAMD_DENSE_ROW] = n_row - n_row2 ;
stats [COLAMD_DENSE_COL] = n_col - n_col2 ;
stats [COLAMD_DEFRAG_COUNT] = ngarbage ;
COLAMD_DEBUG0 (("colamd: done.\n")) ;
return (true) ;
}
/* ========================================================================== */
/* === NON-USER-CALLABLE ROUTINES: ========================================== */
/* ========================================================================== */
/* There are no user-callable routines beyond this point in the file */
/* ========================================================================== */
/* === init_rows_cols ======================================================= */
/* ========================================================================== */
/*
Takes the column form of the matrix in A and creates the row form of the
matrix. Also, row and column attributes are stored in the Col and Row
structs. If the columns are un-sorted or contain duplicate row indices,
this routine will also sort and remove duplicate row indices from the
column form of the matrix. Returns false if the matrix is invalid,
true otherwise. Not user-callable.
*/
template <typename IndexType>
static IndexType init_rows_cols /* returns true if OK, or false otherwise */
(
/* === Parameters ======================================================= */
IndexType n_row, /* number of rows of A */
IndexType n_col, /* number of columns of A */
Colamd_Row<IndexType> Row [], /* of size n_row+1 */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* row indices of A, of size Alen */
IndexType p [], /* pointers to columns in A, of size n_col+1 */
IndexType stats [COLAMD_STATS] /* colamd statistics */
)
{
/* === Local variables ================================================== */
IndexType col ; /* a column index */
IndexType row ; /* a row index */
IndexType *cp ; /* a column pointer */
IndexType *cp_end ; /* a pointer to the end of a column */
IndexType *rp ; /* a row pointer */
IndexType *rp_end ; /* a pointer to the end of a row */
IndexType last_row ; /* previous row */
/* === Initialize columns, and check column pointers ==================== */
for (col = 0 ; col < n_col ; col++)
{
Col [col].start = p [col] ;
Col [col].length = p [col+1] - p [col] ;
if ((Col [col].length) < 0) // extra parentheses to work-around gcc bug 10200
{
/* column pointers must be non-decreasing */
stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
stats [COLAMD_INFO1] = col ;
stats [COLAMD_INFO2] = Col [col].length ;
COLAMD_DEBUG0 (("colamd: col %d length %d < 0\n", col, Col [col].length)) ;
return (false) ;
}
Col [col].shared1.thickness = 1 ;
Col [col].shared2.score = 0 ;
Col [col].shared3.prev = COLAMD_EMPTY ;
Col [col].shared4.degree_next = COLAMD_EMPTY ;
}
/* p [0..n_col] no longer needed, used as "head" in subsequent routines */
/* === Scan columns, compute row degrees, and check row indices ========= */
stats [COLAMD_INFO3] = 0 ; /* number of duplicate or unsorted row indices*/
for (row = 0 ; row < n_row ; row++)
{
Row [row].length = 0 ;
Row [row].shared2.mark = -1 ;
}
for (col = 0 ; col < n_col ; col++)
{
last_row = -1 ;
cp = &A [p [col]] ;
cp_end = &A [p [col+1]] ;
while (cp < cp_end)
{
row = *cp++ ;
/* make sure row indices within range */
if (row < 0 || row >= n_row)
{
stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ;
stats [COLAMD_INFO1] = col ;
stats [COLAMD_INFO2] = row ;
stats [COLAMD_INFO3] = n_row ;
COLAMD_DEBUG0 (("colamd: row %d col %d out of bounds\n", row, col)) ;
return (false) ;
}
if (row <= last_row || Row [row].shared2.mark == col)
{
/* row index are unsorted or repeated (or both), thus col */
/* is jumbled. This is a notice, not an error condition. */
stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ;
stats [COLAMD_INFO1] = col ;
stats [COLAMD_INFO2] = row ;
(stats [COLAMD_INFO3]) ++ ;
COLAMD_DEBUG1 (("colamd: row %d col %d unsorted/duplicate\n",row,col));
}
if (Row [row].shared2.mark != col)
{
Row [row].length++ ;
}
else
{
/* this is a repeated entry in the column, */
/* it will be removed */
Col [col].length-- ;
}
/* mark the row as having been seen in this column */
Row [row].shared2.mark = col ;
last_row = row ;
}
}
/* === Compute row pointers ============================================= */
/* row form of the matrix starts directly after the column */
/* form of matrix in A */
Row [0].start = p [n_col] ;
Row [0].shared1.p = Row [0].start ;
Row [0].shared2.mark = -1 ;
for (row = 1 ; row < n_row ; row++)
{
Row [row].start = Row [row-1].start + Row [row-1].length ;
Row [row].shared1.p = Row [row].start ;
Row [row].shared2.mark = -1 ;
}
/* === Create row form ================================================== */
if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
{
/* if cols jumbled, watch for repeated row indices */
for (col = 0 ; col < n_col ; col++)
{
cp = &A [p [col]] ;
cp_end = &A [p [col+1]] ;
while (cp < cp_end)
{
row = *cp++ ;
if (Row [row].shared2.mark != col)
{
A [(Row [row].shared1.p)++] = col ;
Row [row].shared2.mark = col ;
}
}
}
}
else
{
/* if cols not jumbled, we don't need the mark (this is faster) */
for (col = 0 ; col < n_col ; col++)
{
cp = &A [p [col]] ;
cp_end = &A [p [col+1]] ;
while (cp < cp_end)
{
A [(Row [*cp++].shared1.p)++] = col ;
}
}
}
/* === Clear the row marks and set row degrees ========================== */
for (row = 0 ; row < n_row ; row++)
{
Row [row].shared2.mark = 0 ;
Row [row].shared1.degree = Row [row].length ;
}
/* === See if we need to re-create columns ============================== */
if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
{
COLAMD_DEBUG0 (("colamd: reconstructing column form, matrix jumbled\n")) ;
/* === Compute col pointers ========================================= */
/* col form of the matrix starts at A [0]. */
/* Note, we may have a gap between the col form and the row */
/* form if there were duplicate entries, if so, it will be */
/* removed upon the first garbage collection */
Col [0].start = 0 ;
p [0] = Col [0].start ;
for (col = 1 ; col < n_col ; col++)
{
/* note that the lengths here are for pruned columns, i.e. */
/* no duplicate row indices will exist for these columns */
Col [col].start = Col [col-1].start + Col [col-1].length ;
p [col] = Col [col].start ;
}
/* === Re-create col form =========================================== */
for (row = 0 ; row < n_row ; row++)
{
rp = &A [Row [row].start] ;
rp_end = rp + Row [row].length ;
while (rp < rp_end)
{
A [(p [*rp++])++] = row ;
}
}
}
/* === Done. Matrix is not (or no longer) jumbled ====================== */
return (true) ;
}
/* ========================================================================== */
/* === init_scoring ========================================================= */
/* ========================================================================== */
/*
Kills dense or empty columns and rows, calculates an initial score for
each column, and places all columns in the degree lists. Not user-callable.
*/
template <typename IndexType>
static void init_scoring
(
/* === Parameters ======================================================= */
IndexType n_row, /* number of rows of A */
IndexType n_col, /* number of columns of A */
Colamd_Row<IndexType> Row [], /* of size n_row+1 */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* column form and row form of A */
IndexType head [], /* of size n_col+1 */
double knobs [COLAMD_KNOBS],/* parameters */
IndexType *p_n_row2, /* number of non-dense, non-empty rows */
IndexType *p_n_col2, /* number of non-dense, non-empty columns */
IndexType *p_max_deg /* maximum row degree */
)
{
/* === Local variables ================================================== */
IndexType c ; /* a column index */
IndexType r, row ; /* a row index */
IndexType *cp ; /* a column pointer */
IndexType deg ; /* degree of a row or column */
IndexType *cp_end ; /* a pointer to the end of a column */
IndexType *new_cp ; /* new column pointer */
IndexType col_length ; /* length of pruned column */
IndexType score ; /* current column score */
IndexType n_col2 ; /* number of non-dense, non-empty columns */
IndexType n_row2 ; /* number of non-dense, non-empty rows */
IndexType dense_row_count ; /* remove rows with more entries than this */
IndexType dense_col_count ; /* remove cols with more entries than this */
IndexType min_score ; /* smallest column score */
IndexType max_deg ; /* maximum row degree */
IndexType next_col ; /* Used to add to degree list.*/
/* === Extract knobs ==================================================== */
dense_row_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_ROW] * n_col), n_col)) ;
dense_col_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_COL] * n_row), n_row)) ;
COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ;
max_deg = 0 ;
n_col2 = n_col ;
n_row2 = n_row ;
/* === Kill empty columns =============================================== */
/* Put the empty columns at the end in their natural order, so that LU */
/* factorization can proceed as far as possible. */
for (c = n_col-1 ; c >= 0 ; c--)
{
deg = Col [c].length ;
if (deg == 0)
{
/* this is a empty column, kill and order it last */
Col [c].shared2.order = --n_col2 ;
KILL_PRINCIPAL_COL (c) ;
}
}
COLAMD_DEBUG1 (("colamd: null columns killed: %d\n", n_col - n_col2)) ;
/* === Kill dense columns =============================================== */
/* Put the dense columns at the end, in their natural order */
for (c = n_col-1 ; c >= 0 ; c--)
{
/* skip any dead columns */
if (COL_IS_DEAD (c))
{
continue ;
}
deg = Col [c].length ;
if (deg > dense_col_count)
{
/* this is a dense column, kill and order it last */
Col [c].shared2.order = --n_col2 ;
/* decrement the row degrees */
cp = &A [Col [c].start] ;
cp_end = cp + Col [c].length ;
while (cp < cp_end)
{
Row [*cp++].shared1.degree-- ;
}
KILL_PRINCIPAL_COL (c) ;
}
}
COLAMD_DEBUG1 (("colamd: Dense and null columns killed: %d\n", n_col - n_col2)) ;
/* === Kill dense and empty rows ======================================== */
for (r = 0 ; r < n_row ; r++)
{
deg = Row [r].shared1.degree ;
COLAMD_ASSERT (deg >= 0 && deg <= n_col) ;
if (deg > dense_row_count || deg == 0)
{
/* kill a dense or empty row */
KILL_ROW (r) ;
--n_row2 ;
}
else
{
/* keep track of max degree of remaining rows */
max_deg = numext::maxi(max_deg, deg) ;
}
}
COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ;
/* === Compute initial column scores ==================================== */
/* At this point the row degrees are accurate. They reflect the number */
/* of "live" (non-dense) columns in each row. No empty rows exist. */
/* Some "live" columns may contain only dead rows, however. These are */
/* pruned in the code below. */
/* now find the initial matlab score for each column */
for (c = n_col-1 ; c >= 0 ; c--)
{
/* skip dead column */
if (COL_IS_DEAD (c))
{
continue ;
}
score = 0 ;
cp = &A [Col [c].start] ;
new_cp = cp ;
cp_end = cp + Col [c].length ;
while (cp < cp_end)
{
/* get a row */
row = *cp++ ;
/* skip if dead */
if (ROW_IS_DEAD (row))
{
continue ;
}
/* compact the column */
*new_cp++ = row ;
/* add row's external degree */
score += Row [row].shared1.degree - 1 ;
/* guard against integer overflow */
score = numext::mini(score, n_col) ;
}
/* determine pruned column length */
col_length = (IndexType) (new_cp - &A [Col [c].start]) ;
if (col_length == 0)
{
/* a newly-made null column (all rows in this col are "dense" */
/* and have already been killed) */
COLAMD_DEBUG2 (("Newly null killed: %d\n", c)) ;
Col [c].shared2.order = --n_col2 ;
KILL_PRINCIPAL_COL (c) ;
}
else
{
/* set column length and set score */
COLAMD_ASSERT (score >= 0) ;
COLAMD_ASSERT (score <= n_col) ;
Col [c].length = col_length ;
Col [c].shared2.score = score ;
}
}
COLAMD_DEBUG1 (("colamd: Dense, null, and newly-null columns killed: %d\n",
n_col-n_col2)) ;
/* At this point, all empty rows and columns are dead. All live columns */
/* are "clean" (containing no dead rows) and simplicial (no supercolumns */
/* yet). Rows may contain dead columns, but all live rows contain at */
/* least one live column. */
/* === Initialize degree lists ========================================== */
/* clear the hash buckets */
for (c = 0 ; c <= n_col ; c++)
{
head [c] = COLAMD_EMPTY ;
}
min_score = n_col ;
/* place in reverse order, so low column indices are at the front */
/* of the lists. This is to encourage natural tie-breaking */
for (c = n_col-1 ; c >= 0 ; c--)
{
/* only add principal columns to degree lists */
if (COL_IS_ALIVE (c))
{
COLAMD_DEBUG4 (("place %d score %d minscore %d ncol %d\n",
c, Col [c].shared2.score, min_score, n_col)) ;
/* === Add columns score to DList =============================== */
score = Col [c].shared2.score ;
COLAMD_ASSERT (min_score >= 0) ;
COLAMD_ASSERT (min_score <= n_col) ;
COLAMD_ASSERT (score >= 0) ;
COLAMD_ASSERT (score <= n_col) ;
COLAMD_ASSERT (head [score] >= COLAMD_EMPTY) ;
/* now add this column to dList at proper score location */
next_col = head [score] ;
Col [c].shared3.prev = COLAMD_EMPTY ;
Col [c].shared4.degree_next = next_col ;
/* if there already was a column with the same score, set its */
/* previous pointer to this new column */
if (next_col != COLAMD_EMPTY)
{
Col [next_col].shared3.prev = c ;
}
head [score] = c ;
/* see if this score is less than current min */
min_score = numext::mini(min_score, score) ;
}
}
/* === Return number of remaining columns, and max row degree =========== */
*p_n_col2 = n_col2 ;
*p_n_row2 = n_row2 ;
*p_max_deg = max_deg ;
}
/* ========================================================================== */
/* === find_ordering ======================================================== */
/* ========================================================================== */
/*
Order the principal columns of the supercolumn form of the matrix
(no supercolumns on input). Uses a minimum approximate column minimum
degree ordering method. Not user-callable.
*/
template <typename IndexType>
static IndexType find_ordering /* return the number of garbage collections */
(
/* === Parameters ======================================================= */
IndexType n_row, /* number of rows of A */
IndexType n_col, /* number of columns of A */
IndexType Alen, /* size of A, 2*nnz + n_col or larger */
Colamd_Row<IndexType> Row [], /* of size n_row+1 */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* column form and row form of A */
IndexType head [], /* of size n_col+1 */
IndexType n_col2, /* Remaining columns to order */
IndexType max_deg, /* Maximum row degree */
IndexType pfree /* index of first free slot (2*nnz on entry) */
)
{
/* === Local variables ================================================== */
IndexType k ; /* current pivot ordering step */
IndexType pivot_col ; /* current pivot column */
IndexType *cp ; /* a column pointer */
IndexType *rp ; /* a row pointer */
IndexType pivot_row ; /* current pivot row */
IndexType *new_cp ; /* modified column pointer */
IndexType *new_rp ; /* modified row pointer */
IndexType pivot_row_start ; /* pointer to start of pivot row */
IndexType pivot_row_degree ; /* number of columns in pivot row */
IndexType pivot_row_length ; /* number of supercolumns in pivot row */
IndexType pivot_col_score ; /* score of pivot column */
IndexType needed_memory ; /* free space needed for pivot row */
IndexType *cp_end ; /* pointer to the end of a column */
IndexType *rp_end ; /* pointer to the end of a row */
IndexType row ; /* a row index */
IndexType col ; /* a column index */
IndexType max_score ; /* maximum possible score */
IndexType cur_score ; /* score of current column */
unsigned int hash ; /* hash value for supernode detection */
IndexType head_column ; /* head of hash bucket */
IndexType first_col ; /* first column in hash bucket */
IndexType tag_mark ; /* marker value for mark array */
IndexType row_mark ; /* Row [row].shared2.mark */
IndexType set_difference ; /* set difference size of row with pivot row */
IndexType min_score ; /* smallest column score */
IndexType col_thickness ; /* "thickness" (no. of columns in a supercol) */
IndexType max_mark ; /* maximum value of tag_mark */
IndexType pivot_col_thickness ; /* number of columns represented by pivot col */
IndexType prev_col ; /* Used by Dlist operations. */
IndexType next_col ; /* Used by Dlist operations. */
IndexType ngarbage ; /* number of garbage collections performed */
/* === Initialization and clear mark ==================================== */
max_mark = INT_MAX - n_col ; /* INT_MAX defined in <limits.h> */
tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
min_score = 0 ;
ngarbage = 0 ;
COLAMD_DEBUG1 (("colamd: Ordering, n_col2=%d\n", n_col2)) ;
/* === Order the columns ================================================ */
for (k = 0 ; k < n_col2 ; /* 'k' is incremented below */)
{
/* === Select pivot column, and order it ============================ */
/* make sure degree list isn't empty */
COLAMD_ASSERT (min_score >= 0) ;
COLAMD_ASSERT (min_score <= n_col) ;
COLAMD_ASSERT (head [min_score] >= COLAMD_EMPTY) ;
/* get pivot column from head of minimum degree list */
while (min_score < n_col && head [min_score] == COLAMD_EMPTY)
{
min_score++ ;
}
pivot_col = head [min_score] ;
COLAMD_ASSERT (pivot_col >= 0 && pivot_col <= n_col) ;
next_col = Col [pivot_col].shared4.degree_next ;
head [min_score] = next_col ;
if (next_col != COLAMD_EMPTY)
{
Col [next_col].shared3.prev = COLAMD_EMPTY ;
}
COLAMD_ASSERT (COL_IS_ALIVE (pivot_col)) ;
COLAMD_DEBUG3 (("Pivot col: %d\n", pivot_col)) ;
/* remember score for defrag check */
pivot_col_score = Col [pivot_col].shared2.score ;
/* the pivot column is the kth column in the pivot order */
Col [pivot_col].shared2.order = k ;
/* increment order count by column thickness */
pivot_col_thickness = Col [pivot_col].shared1.thickness ;
k += pivot_col_thickness ;
COLAMD_ASSERT (pivot_col_thickness > 0) ;
/* === Garbage_collection, if necessary ============================= */
needed_memory = numext::mini(pivot_col_score, n_col - k) ;
if (pfree + needed_memory >= Alen)
{
pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
ngarbage++ ;
/* after garbage collection we will have enough */
COLAMD_ASSERT (pfree + needed_memory < Alen) ;
/* garbage collection has wiped out the Row[].shared2.mark array */
tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
}
/* === Compute pivot row pattern ==================================== */
/* get starting location for this new merged row */
pivot_row_start = pfree ;
/* initialize new row counts to zero */
pivot_row_degree = 0 ;
/* tag pivot column as having been visited so it isn't included */
/* in merged pivot row */
Col [pivot_col].shared1.thickness = -pivot_col_thickness ;
/* pivot row is the union of all rows in the pivot column pattern */
cp = &A [Col [pivot_col].start] ;
cp_end = cp + Col [pivot_col].length ;
while (cp < cp_end)
{
/* get a row */
row = *cp++ ;
COLAMD_DEBUG4 (("Pivot col pattern %d %d\n", ROW_IS_ALIVE (row), row)) ;
/* skip if row is dead */
if (ROW_IS_DEAD (row))
{
continue ;
}
rp = &A [Row [row].start] ;
rp_end = rp + Row [row].length ;
while (rp < rp_end)
{
/* get a column */
col = *rp++ ;
/* add the column, if alive and untagged */
col_thickness = Col [col].shared1.thickness ;
if (col_thickness > 0 && COL_IS_ALIVE (col))
{
/* tag column in pivot row */
Col [col].shared1.thickness = -col_thickness ;
COLAMD_ASSERT (pfree < Alen) ;
/* place column in pivot row */
A [pfree++] = col ;
pivot_row_degree += col_thickness ;
}
}
}
/* clear tag on pivot column */
Col [pivot_col].shared1.thickness = pivot_col_thickness ;
max_deg = numext::maxi(max_deg, pivot_row_degree) ;
/* === Kill all rows used to construct pivot row ==================== */
/* also kill pivot row, temporarily */
cp = &A [Col [pivot_col].start] ;
cp_end = cp + Col [pivot_col].length ;
while (cp < cp_end)
{
/* may be killing an already dead row */
row = *cp++ ;
COLAMD_DEBUG3 (("Kill row in pivot col: %d\n", row)) ;
KILL_ROW (row) ;
}
/* === Select a row index to use as the new pivot row =============== */
pivot_row_length = pfree - pivot_row_start ;
if (pivot_row_length > 0)
{
/* pick the "pivot" row arbitrarily (first row in col) */
pivot_row = A [Col [pivot_col].start] ;
COLAMD_DEBUG3 (("Pivotal row is %d\n", pivot_row)) ;
}
else
{
/* there is no pivot row, since it is of zero length */
pivot_row = COLAMD_EMPTY ;
COLAMD_ASSERT (pivot_row_length == 0) ;
}
COLAMD_ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ;
/* === Approximate degree computation =============================== */
/* Here begins the computation of the approximate degree. The column */
/* score is the sum of the pivot row "length", plus the size of the */
/* set differences of each row in the column minus the pattern of the */
/* pivot row itself. The column ("thickness") itself is also */
/* excluded from the column score (we thus use an approximate */
/* external degree). */
/* The time taken by the following code (compute set differences, and */
/* add them up) is proportional to the size of the data structure */
/* being scanned - that is, the sum of the sizes of each column in */
/* the pivot row. Thus, the amortized time to compute a column score */
/* is proportional to the size of that column (where size, in this */
/* context, is the column "length", or the number of row indices */
/* in that column). The number of row indices in a column is */
/* monotonically non-decreasing, from the length of the original */
/* column on input to colamd. */
/* === Compute set differences ====================================== */
COLAMD_DEBUG3 (("** Computing set differences phase. **\n")) ;
/* pivot row is currently dead - it will be revived later. */
COLAMD_DEBUG3 (("Pivot row: ")) ;
/* for each column in pivot row */
rp = &A [pivot_row_start] ;
rp_end = rp + pivot_row_length ;
while (rp < rp_end)
{
col = *rp++ ;
COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
COLAMD_DEBUG3 (("Col: %d\n", col)) ;
/* clear tags used to construct pivot row pattern */
col_thickness = -Col [col].shared1.thickness ;
COLAMD_ASSERT (col_thickness > 0) ;
Col [col].shared1.thickness = col_thickness ;
/* === Remove column from degree list =========================== */
cur_score = Col [col].shared2.score ;
prev_col = Col [col].shared3.prev ;
next_col = Col [col].shared4.degree_next ;
COLAMD_ASSERT (cur_score >= 0) ;
COLAMD_ASSERT (cur_score <= n_col) ;
COLAMD_ASSERT (cur_score >= COLAMD_EMPTY) ;
if (prev_col == COLAMD_EMPTY)
{
head [cur_score] = next_col ;
}
else
{
Col [prev_col].shared4.degree_next = next_col ;
}
if (next_col != COLAMD_EMPTY)
{
Col [next_col].shared3.prev = prev_col ;
}
/* === Scan the column ========================================== */
cp = &A [Col [col].start] ;
cp_end = cp + Col [col].length ;
while (cp < cp_end)
{
/* get a row */
row = *cp++ ;
row_mark = Row [row].shared2.mark ;
/* skip if dead */
if (ROW_IS_MARKED_DEAD (row_mark))
{
continue ;
}
COLAMD_ASSERT (row != pivot_row) ;
set_difference = row_mark - tag_mark ;
/* check if the row has been seen yet */
if (set_difference < 0)
{
COLAMD_ASSERT (Row [row].shared1.degree <= max_deg) ;
set_difference = Row [row].shared1.degree ;
}
/* subtract column thickness from this row's set difference */
set_difference -= col_thickness ;
COLAMD_ASSERT (set_difference >= 0) ;
/* absorb this row if the set difference becomes zero */
if (set_difference == 0)
{
COLAMD_DEBUG3 (("aggressive absorption. Row: %d\n", row)) ;
KILL_ROW (row) ;
}
else
{
/* save the new mark */
Row [row].shared2.mark = set_difference + tag_mark ;
}
}
}
/* === Add up set differences for each column ======================= */
COLAMD_DEBUG3 (("** Adding set differences phase. **\n")) ;
/* for each column in pivot row */
rp = &A [pivot_row_start] ;
rp_end = rp + pivot_row_length ;
while (rp < rp_end)
{
/* get a column */
col = *rp++ ;
COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
hash = 0 ;
cur_score = 0 ;
cp = &A [Col [col].start] ;
/* compact the column */
new_cp = cp ;
cp_end = cp + Col [col].length ;
COLAMD_DEBUG4 (("Adding set diffs for Col: %d.\n", col)) ;
while (cp < cp_end)
{
/* get a row */
row = *cp++ ;
COLAMD_ASSERT(row >= 0 && row < n_row) ;
row_mark = Row [row].shared2.mark ;
/* skip if dead */
if (ROW_IS_MARKED_DEAD (row_mark))
{
continue ;
}
COLAMD_ASSERT (row_mark > tag_mark) ;
/* compact the column */
*new_cp++ = row ;
/* compute hash function */
hash += row ;
/* add set difference */
cur_score += row_mark - tag_mark ;
/* integer overflow... */
cur_score = numext::mini(cur_score, n_col) ;
}
/* recompute the column's length */
Col [col].length = (IndexType) (new_cp - &A [Col [col].start]) ;
/* === Further mass elimination ================================= */
if (Col [col].length == 0)
{
COLAMD_DEBUG4 (("further mass elimination. Col: %d\n", col)) ;
/* nothing left but the pivot row in this column */
KILL_PRINCIPAL_COL (col) ;
pivot_row_degree -= Col [col].shared1.thickness ;
COLAMD_ASSERT (pivot_row_degree >= 0) ;
/* order it */
Col [col].shared2.order = k ;
/* increment order count by column thickness */
k += Col [col].shared1.thickness ;
}
else
{
/* === Prepare for supercolumn detection ==================== */
COLAMD_DEBUG4 (("Preparing supercol detection for Col: %d.\n", col)) ;
/* save score so far */
Col [col].shared2.score = cur_score ;
/* add column to hash table, for supercolumn detection */
hash %= n_col + 1 ;
COLAMD_DEBUG4 ((" Hash = %d, n_col = %d.\n", hash, n_col)) ;
COLAMD_ASSERT (hash <= n_col) ;
head_column = head [hash] ;
if (head_column > COLAMD_EMPTY)
{
/* degree list "hash" is non-empty, use prev (shared3) of */
/* first column in degree list as head of hash bucket */
first_col = Col [head_column].shared3.headhash ;
Col [head_column].shared3.headhash = col ;
}
else
{
/* degree list "hash" is empty, use head as hash bucket */
first_col = - (head_column + 2) ;
head [hash] = - (col + 2) ;
}
Col [col].shared4.hash_next = first_col ;
/* save hash function in Col [col].shared3.hash */
Col [col].shared3.hash = (IndexType) hash ;
COLAMD_ASSERT (COL_IS_ALIVE (col)) ;
}
}
/* The approximate external column degree is now computed. */
/* === Supercolumn detection ======================================== */
COLAMD_DEBUG3 (("** Supercolumn detection phase. **\n")) ;
Eigen::internal::detect_super_cols (Col, A, head, pivot_row_start, pivot_row_length) ;
/* === Kill the pivotal column ====================================== */
KILL_PRINCIPAL_COL (pivot_col) ;
/* === Clear mark =================================================== */
tag_mark += (max_deg + 1) ;
if (tag_mark >= max_mark)
{
COLAMD_DEBUG2 (("clearing tag_mark\n")) ;
tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
}
/* === Finalize the new pivot row, and column scores ================ */
COLAMD_DEBUG3 (("** Finalize scores phase. **\n")) ;
/* for each column in pivot row */
rp = &A [pivot_row_start] ;
/* compact the pivot row */
new_rp = rp ;
rp_end = rp + pivot_row_length ;
while (rp < rp_end)
{
col = *rp++ ;
/* skip dead columns */
if (COL_IS_DEAD (col))
{
continue ;
}
*new_rp++ = col ;
/* add new pivot row to column */
A [Col [col].start + (Col [col].length++)] = pivot_row ;
/* retrieve score so far and add on pivot row's degree. */
/* (we wait until here for this in case the pivot */
/* row's degree was reduced due to mass elimination). */
cur_score = Col [col].shared2.score + pivot_row_degree ;
/* calculate the max possible score as the number of */
/* external columns minus the 'k' value minus the */
/* columns thickness */
max_score = n_col - k - Col [col].shared1.thickness ;
/* make the score the external degree of the union-of-rows */
cur_score -= Col [col].shared1.thickness ;
/* make sure score is less or equal than the max score */
cur_score = numext::mini(cur_score, max_score) ;
COLAMD_ASSERT (cur_score >= 0) ;
/* store updated score */
Col [col].shared2.score = cur_score ;
/* === Place column back in degree list ========================= */
COLAMD_ASSERT (min_score >= 0) ;
COLAMD_ASSERT (min_score <= n_col) ;
COLAMD_ASSERT (cur_score >= 0) ;
COLAMD_ASSERT (cur_score <= n_col) ;
COLAMD_ASSERT (head [cur_score] >= COLAMD_EMPTY) ;
next_col = head [cur_score] ;
Col [col].shared4.degree_next = next_col ;
Col [col].shared3.prev = COLAMD_EMPTY ;
if (next_col != COLAMD_EMPTY)
{
Col [next_col].shared3.prev = col ;
}
head [cur_score] = col ;
/* see if this score is less than current min */
min_score = numext::mini(min_score, cur_score) ;
}
/* === Resurrect the new pivot row ================================== */
if (pivot_row_degree > 0)
{
/* update pivot row length to reflect any cols that were killed */
/* during super-col detection and mass elimination */
Row [pivot_row].start = pivot_row_start ;
Row [pivot_row].length = (IndexType) (new_rp - &A[pivot_row_start]) ;
Row [pivot_row].shared1.degree = pivot_row_degree ;
Row [pivot_row].shared2.mark = 0 ;
/* pivot row is no longer dead */
}
}
/* === All principal columns have now been ordered ====================== */
return (ngarbage) ;
}
/* ========================================================================== */
/* === order_children ======================================================= */
/* ========================================================================== */
/*
The find_ordering routine has ordered all of the principal columns (the
representatives of the supercolumns). The non-principal columns have not
yet been ordered. This routine orders those columns by walking up the
parent tree (a column is a child of the column which absorbed it). The
final permutation vector is then placed in p [0 ... n_col-1], with p [0]
being the first column, and p [n_col-1] being the last. It doesn't look
like it at first glance, but be assured that this routine takes time linear
in the number of columns. Although not immediately obvious, the time
taken by this routine is O (n_col), that is, linear in the number of
columns. Not user-callable.
*/
template <typename IndexType>
static inline void order_children
(
/* === Parameters ======================================================= */
IndexType n_col, /* number of columns of A */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType p [] /* p [0 ... n_col-1] is the column permutation*/
)
{
/* === Local variables ================================================== */
IndexType i ; /* loop counter for all columns */
IndexType c ; /* column index */
IndexType parent ; /* index of column's parent */
IndexType order ; /* column's order */
/* === Order each non-principal column ================================== */
for (i = 0 ; i < n_col ; i++)
{
/* find an un-ordered non-principal column */
COLAMD_ASSERT (COL_IS_DEAD (i)) ;
if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == COLAMD_EMPTY)
{
parent = i ;
/* once found, find its principal parent */
do
{
parent = Col [parent].shared1.parent ;
} while (!COL_IS_DEAD_PRINCIPAL (parent)) ;
/* now, order all un-ordered non-principal columns along path */
/* to this parent. collapse tree at the same time */
c = i ;
/* get order of parent */
order = Col [parent].shared2.order ;
do
{
COLAMD_ASSERT (Col [c].shared2.order == COLAMD_EMPTY) ;
/* order this column */
Col [c].shared2.order = order++ ;
/* collaps tree */
Col [c].shared1.parent = parent ;
/* get immediate parent of this column */
c = Col [c].shared1.parent ;
/* continue until we hit an ordered column. There are */
/* guarranteed not to be anymore unordered columns */
/* above an ordered column */
} while (Col [c].shared2.order == COLAMD_EMPTY) ;
/* re-order the super_col parent to largest order for this group */
Col [parent].shared2.order = order ;
}
}
/* === Generate the permutation ========================================= */
for (c = 0 ; c < n_col ; c++)
{
p [Col [c].shared2.order] = c ;
}
}
/* ========================================================================== */
/* === detect_super_cols ==================================================== */
/* ========================================================================== */
/*
Detects supercolumns by finding matches between columns in the hash buckets.
Check amongst columns in the set A [row_start ... row_start + row_length-1].
The columns under consideration are currently *not* in the degree lists,
and have already been placed in the hash buckets.
The hash bucket for columns whose hash function is equal to h is stored
as follows:
if head [h] is >= 0, then head [h] contains a degree list, so:
head [h] is the first column in degree bucket h.
Col [head [h]].headhash gives the first column in hash bucket h.
otherwise, the degree list is empty, and:
-(head [h] + 2) is the first column in hash bucket h.
For a column c in a hash bucket, Col [c].shared3.prev is NOT a "previous
column" pointer. Col [c].shared3.hash is used instead as the hash number
for that column. The value of Col [c].shared4.hash_next is the next column
in the same hash bucket.
Assuming no, or "few" hash collisions, the time taken by this routine is
linear in the sum of the sizes (lengths) of each column whose score has
just been computed in the approximate degree computation.
Not user-callable.
*/
template <typename IndexType>
static void detect_super_cols
(
/* === Parameters ======================================================= */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* row indices of A */
IndexType head [], /* head of degree lists and hash buckets */
IndexType row_start, /* pointer to set of columns to check */
IndexType row_length /* number of columns to check */
)
{
/* === Local variables ================================================== */
IndexType hash ; /* hash value for a column */
IndexType *rp ; /* pointer to a row */
IndexType c ; /* a column index */
IndexType super_c ; /* column index of the column to absorb into */
IndexType *cp1 ; /* column pointer for column super_c */
IndexType *cp2 ; /* column pointer for column c */
IndexType length ; /* length of column super_c */
IndexType prev_c ; /* column preceding c in hash bucket */
IndexType i ; /* loop counter */
IndexType *rp_end ; /* pointer to the end of the row */
IndexType col ; /* a column index in the row to check */
IndexType head_column ; /* first column in hash bucket or degree list */
IndexType first_col ; /* first column in hash bucket */
/* === Consider each column in the row ================================== */
rp = &A [row_start] ;
rp_end = rp + row_length ;
while (rp < rp_end)
{
col = *rp++ ;
if (COL_IS_DEAD (col))
{
continue ;
}
/* get hash number for this column */
hash = Col [col].shared3.hash ;
COLAMD_ASSERT (hash <= n_col) ;
/* === Get the first column in this hash bucket ===================== */
head_column = head [hash] ;
if (head_column > COLAMD_EMPTY)
{
first_col = Col [head_column].shared3.headhash ;
}
else
{
first_col = - (head_column + 2) ;
}
/* === Consider each column in the hash bucket ====================== */
for (super_c = first_col ; super_c != COLAMD_EMPTY ;
super_c = Col [super_c].shared4.hash_next)
{
COLAMD_ASSERT (COL_IS_ALIVE (super_c)) ;
COLAMD_ASSERT (Col [super_c].shared3.hash == hash) ;
length = Col [super_c].length ;
/* prev_c is the column preceding column c in the hash bucket */
prev_c = super_c ;
/* === Compare super_c with all columns after it ================ */
for (c = Col [super_c].shared4.hash_next ;
c != COLAMD_EMPTY ; c = Col [c].shared4.hash_next)
{
COLAMD_ASSERT (c != super_c) ;
COLAMD_ASSERT (COL_IS_ALIVE (c)) ;
COLAMD_ASSERT (Col [c].shared3.hash == hash) ;
/* not identical if lengths or scores are different */
if (Col [c].length != length ||
Col [c].shared2.score != Col [super_c].shared2.score)
{
prev_c = c ;
continue ;
}
/* compare the two columns */
cp1 = &A [Col [super_c].start] ;
cp2 = &A [Col [c].start] ;
for (i = 0 ; i < length ; i++)
{
/* the columns are "clean" (no dead rows) */
COLAMD_ASSERT (ROW_IS_ALIVE (*cp1)) ;
COLAMD_ASSERT (ROW_IS_ALIVE (*cp2)) ;
/* row indices will same order for both supercols, */
/* no gather scatter nessasary */
if (*cp1++ != *cp2++)
{
break ;
}
}
/* the two columns are different if the for-loop "broke" */
if (i != length)
{
prev_c = c ;
continue ;
}
/* === Got it! two columns are identical =================== */
COLAMD_ASSERT (Col [c].shared2.score == Col [super_c].shared2.score) ;
Col [super_c].shared1.thickness += Col [c].shared1.thickness ;
Col [c].shared1.parent = super_c ;
KILL_NON_PRINCIPAL_COL (c) ;
/* order c later, in order_children() */
Col [c].shared2.order = COLAMD_EMPTY ;
/* remove c from hash bucket */
Col [prev_c].shared4.hash_next = Col [c].shared4.hash_next ;
}
}
/* === Empty this hash bucket ======================================= */
if (head_column > COLAMD_EMPTY)
{
/* corresponding degree list "hash" is not empty */
Col [head_column].shared3.headhash = COLAMD_EMPTY ;
}
else
{
/* corresponding degree list "hash" is empty */
head [hash] = COLAMD_EMPTY ;
}
}
}
/* ========================================================================== */
/* === garbage_collection =================================================== */
/* ========================================================================== */
/*
Defragments and compacts columns and rows in the workspace A. Used when
all avaliable memory has been used while performing row merging. Returns
the index of the first free position in A, after garbage collection. The
time taken by this routine is linear is the size of the array A, which is
itself linear in the number of nonzeros in the input matrix.
Not user-callable.
*/
template <typename IndexType>
static IndexType garbage_collection /* returns the new value of pfree */
(
/* === Parameters ======================================================= */
IndexType n_row, /* number of rows */
IndexType n_col, /* number of columns */
Colamd_Row<IndexType> Row [], /* row info */
colamd_col<IndexType> Col [], /* column info */
IndexType A [], /* A [0 ... Alen-1] holds the matrix */
IndexType *pfree /* &A [0] ... pfree is in use */
)
{
/* === Local variables ================================================== */
IndexType *psrc ; /* source pointer */
IndexType *pdest ; /* destination pointer */
IndexType j ; /* counter */
IndexType r ; /* a row index */
IndexType c ; /* a column index */
IndexType length ; /* length of a row or column */
/* === Defragment the columns =========================================== */
pdest = &A[0] ;
for (c = 0 ; c < n_col ; c++)
{
if (COL_IS_ALIVE (c))
{
psrc = &A [Col [c].start] ;
/* move and compact the column */
COLAMD_ASSERT (pdest <= psrc) ;
Col [c].start = (IndexType) (pdest - &A [0]) ;
length = Col [c].length ;
for (j = 0 ; j < length ; j++)
{
r = *psrc++ ;
if (ROW_IS_ALIVE (r))
{
*pdest++ = r ;
}
}
Col [c].length = (IndexType) (pdest - &A [Col [c].start]) ;
}
}
/* === Prepare to defragment the rows =================================== */
for (r = 0 ; r < n_row ; r++)
{
if (ROW_IS_ALIVE (r))
{
if (Row [r].length == 0)
{
/* this row is of zero length. cannot compact it, so kill it */
COLAMD_DEBUG3 (("Defrag row kill\n")) ;
KILL_ROW (r) ;
}
else
{
/* save first column index in Row [r].shared2.first_column */
psrc = &A [Row [r].start] ;
Row [r].shared2.first_column = *psrc ;
COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
/* flag the start of the row with the one's complement of row */
*psrc = ONES_COMPLEMENT (r) ;
}
}
}
/* === Defragment the rows ============================================== */
psrc = pdest ;
while (psrc < pfree)
{
/* find a negative number ... the start of a row */
if (*psrc++ < 0)
{
psrc-- ;
/* get the row index */
r = ONES_COMPLEMENT (*psrc) ;
COLAMD_ASSERT (r >= 0 && r < n_row) ;
/* restore first column index */
*psrc = Row [r].shared2.first_column ;
COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
/* move and compact the row */
COLAMD_ASSERT (pdest <= psrc) ;
Row [r].start = (IndexType) (pdest - &A [0]) ;
length = Row [r].length ;
for (j = 0 ; j < length ; j++)
{
c = *psrc++ ;
if (COL_IS_ALIVE (c))
{
*pdest++ = c ;
}
}
Row [r].length = (IndexType) (pdest - &A [Row [r].start]) ;
}
}
/* ensure we found all the rows */
COLAMD_ASSERT (debug_rows == 0) ;
/* === Return the new value of pfree ==================================== */
return ((IndexType) (pdest - &A [0])) ;
}
/* ========================================================================== */
/* === clear_mark =========================================================== */
/* ========================================================================== */
/*
Clears the Row [].shared2.mark array, and returns the new tag_mark.
Return value is the new tag_mark. Not user-callable.
*/
template <typename IndexType>
static inline IndexType clear_mark /* return the new value for tag_mark */
(
/* === Parameters ======================================================= */
IndexType n_row, /* number of rows in A */
Colamd_Row<IndexType> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
)
{
/* === Local variables ================================================== */
IndexType r ;
for (r = 0 ; r < n_row ; r++)
{
if (ROW_IS_ALIVE (r))
{
Row [r].shared2.mark = 0 ;
}
}
return (1) ;
}
} // namespace internal
#endif
| 62,266 | 32.767354 | 247 |
h
|
abess
|
abess-master/python/include/Eigen/src/OrderingMethods/Ordering.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ORDERING_H
#define EIGEN_ORDERING_H
namespace Eigen {
#include "Eigen_Colamd.h"
namespace internal {
/** \internal
* \ingroup OrderingMethods_Module
* \param[in] A the input non-symmetric matrix
* \param[out] symmat the symmetric pattern A^T+A from the input matrix \a A.
* FIXME: The values should not be considered here
*/
template<typename MatrixType>
void ordering_helper_at_plus_a(const MatrixType& A, MatrixType& symmat)
{
MatrixType C;
C = A.transpose(); // NOTE: Could be costly
for (int i = 0; i < C.rows(); i++)
{
for (typename MatrixType::InnerIterator it(C, i); it; ++it)
it.valueRef() = 0.0;
}
symmat = C + A;
}
}
#ifndef EIGEN_MPL2_ONLY
/** \ingroup OrderingMethods_Module
* \class AMDOrdering
*
* Functor computing the \em approximate \em minimum \em degree ordering
* If the matrix is not structurally symmetric, an ordering of A^T+A is computed
* \tparam StorageIndex The type of indices of the matrix
* \sa COLAMDOrdering
*/
template <typename StorageIndex>
class AMDOrdering
{
public:
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
/** Compute the permutation vector from a sparse matrix
* This routine is much faster if the input matrix is column-major
*/
template <typename MatrixType>
void operator()(const MatrixType& mat, PermutationType& perm)
{
// Compute the symmetric pattern
SparseMatrix<typename MatrixType::Scalar, ColMajor, StorageIndex> symm;
internal::ordering_helper_at_plus_a(mat,symm);
// Call the AMD routine
//m_mat.prune(keep_diag());
internal::minimum_degree_ordering(symm, perm);
}
/** Compute the permutation with a selfadjoint matrix */
template <typename SrcType, unsigned int SrcUpLo>
void operator()(const SparseSelfAdjointView<SrcType, SrcUpLo>& mat, PermutationType& perm)
{
SparseMatrix<typename SrcType::Scalar, ColMajor, StorageIndex> C; C = mat;
// Call the AMD routine
// m_mat.prune(keep_diag()); //Remove the diagonal elements
internal::minimum_degree_ordering(C, perm);
}
};
#endif // EIGEN_MPL2_ONLY
/** \ingroup OrderingMethods_Module
* \class NaturalOrdering
*
* Functor computing the natural ordering (identity)
*
* \note Returns an empty permutation matrix
* \tparam StorageIndex The type of indices of the matrix
*/
template <typename StorageIndex>
class NaturalOrdering
{
public:
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
/** Compute the permutation vector from a column-major sparse matrix */
template <typename MatrixType>
void operator()(const MatrixType& /*mat*/, PermutationType& perm)
{
perm.resize(0);
}
};
/** \ingroup OrderingMethods_Module
* \class COLAMDOrdering
*
* \tparam StorageIndex The type of indices of the matrix
*
* Functor computing the \em column \em approximate \em minimum \em degree ordering
* The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()).
*/
template<typename StorageIndex>
class COLAMDOrdering
{
public:
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
typedef Matrix<StorageIndex, Dynamic, 1> IndexVector;
/** Compute the permutation vector \a perm form the sparse matrix \a mat
* \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
*/
template <typename MatrixType>
void operator() (const MatrixType& mat, PermutationType& perm)
{
eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering");
StorageIndex m = StorageIndex(mat.rows());
StorageIndex n = StorageIndex(mat.cols());
StorageIndex nnz = StorageIndex(mat.nonZeros());
// Get the recommended value of Alen to be used by colamd
StorageIndex Alen = internal::colamd_recommended(nnz, m, n);
// Set the default parameters
double knobs [COLAMD_KNOBS];
StorageIndex stats [COLAMD_STATS];
internal::colamd_set_defaults(knobs);
IndexVector p(n+1), A(Alen);
for(StorageIndex i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
// Call Colamd routine to compute the ordering
StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
EIGEN_UNUSED_VARIABLE(info);
eigen_assert( info && "COLAMD failed " );
perm.resize(n);
for (StorageIndex i = 0; i < n; i++) perm.indices()(p(i)) = i;
}
};
} // end namespace Eigen
#endif
| 5,227 | 32.088608 | 163 |
h
|
abess
|
abess-master/python/include/Eigen/src/PaStiXSupport/PaStiXSupport.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PASTIXSUPPORT_H
#define EIGEN_PASTIXSUPPORT_H
namespace Eigen {
#if defined(DCOMPLEX)
#define PASTIX_COMPLEX COMPLEX
#define PASTIX_DCOMPLEX DCOMPLEX
#else
#define PASTIX_COMPLEX std::complex<float>
#define PASTIX_DCOMPLEX std::complex<double>
#endif
/** \ingroup PaStiXSupport_Module
* \brief Interface to the PaStix solver
*
* This class is used to solve the linear systems A.X = B via the PaStix library.
* The matrix can be either real or complex, symmetric or not.
*
* \sa TutorialSparseDirectSolvers
*/
template<typename _MatrixType, bool IsStrSym = false> class PastixLU;
template<typename _MatrixType, int Options> class PastixLLT;
template<typename _MatrixType, int Options> class PastixLDLT;
namespace internal
{
template<class Pastix> struct pastix_traits;
template<typename _MatrixType>
struct pastix_traits< PastixLU<_MatrixType> >
{
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
struct pastix_traits< PastixLLT<_MatrixType,Options> >
{
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
struct pastix_traits< PastixLDLT<_MatrixType,Options> >
{
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
}
void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
}
void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_COMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_COMPLEX*>(x), nbrhs, iparm, dparm);
}
void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_DCOMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_DCOMPLEX*>(x), nbrhs, iparm, dparm);
}
// Convert the matrix to Fortran-style Numbering
template <typename MatrixType>
void c_to_fortran_numbering (MatrixType& mat)
{
if ( !(mat.outerIndexPtr()[0]) )
{
int i;
for(i = 0; i <= mat.rows(); ++i)
++mat.outerIndexPtr()[i];
for(i = 0; i < mat.nonZeros(); ++i)
++mat.innerIndexPtr()[i];
}
}
// Convert to C-style Numbering
template <typename MatrixType>
void fortran_to_c_numbering (MatrixType& mat)
{
// Check the Numbering
if ( mat.outerIndexPtr()[0] == 1 )
{ // Convert to C-style numbering
int i;
for(i = 0; i <= mat.rows(); ++i)
--mat.outerIndexPtr()[i];
for(i = 0; i < mat.nonZeros(); ++i)
--mat.innerIndexPtr()[i];
}
}
}
// This is the base class to interface with PaStiX functions.
// Users should not used this class directly.
template <class Derived>
class PastixBase : public SparseSolverBase<Derived>
{
protected:
typedef SparseSolverBase<Derived> Base;
using Base::derived;
using Base::m_isInitialized;
public:
using Base::_solve_impl;
typedef typename internal::pastix_traits<Derived>::MatrixType _MatrixType;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef SparseMatrix<Scalar, ColMajor> ColSpMatrix;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public:
PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_pastixdata(0), m_size(0)
{
init();
}
~PastixBase()
{
clean();
}
template<typename Rhs,typename Dest>
bool _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const;
/** Returns a reference to the integer vector IPARM of PaStiX parameters
* to modify the default parameters.
* The statistics related to the different phases of factorization and solve are saved here as well
* \sa analyzePattern() factorize()
*/
Array<StorageIndex,IPARM_SIZE,1>& iparm()
{
return m_iparm;
}
/** Return a reference to a particular index parameter of the IPARM vector
* \sa iparm()
*/
int& iparm(int idxparam)
{
return m_iparm(idxparam);
}
/** Returns a reference to the double vector DPARM of PaStiX parameters
* The statistics related to the different phases of factorization and solve are saved here as well
* \sa analyzePattern() factorize()
*/
Array<double,DPARM_SIZE,1>& dparm()
{
return m_dparm;
}
/** Return a reference to a particular index parameter of the DPARM vector
* \sa dparm()
*/
double& dparm(int idxparam)
{
return m_dparm(idxparam);
}
inline Index cols() const { return m_size; }
inline Index rows() const { return m_size; }
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the PaStiX reports a problem
* \c InvalidInput if the input matrix is invalid
*
* \sa iparm()
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
protected:
// Initialize the Pastix data structure, check the matrix
void init();
// Compute the ordering and the symbolic factorization
void analyzePattern(ColSpMatrix& mat);
// Compute the numerical factorization
void factorize(ColSpMatrix& mat);
// Free all the data allocated by Pastix
void clean()
{
eigen_assert(m_initisOk && "The Pastix structure should be allocated first");
m_iparm(IPARM_START_TASK) = API_TASK_CLEAN;
m_iparm(IPARM_END_TASK) = API_TASK_CLEAN;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,
m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
}
void compute(ColSpMatrix& mat);
int m_initisOk;
int m_analysisIsOk;
int m_factorizationIsOk;
mutable ComputationInfo m_info;
mutable pastix_data_t *m_pastixdata; // Data structure for pastix
mutable int m_comm; // The MPI communicator identifier
mutable Array<int,IPARM_SIZE,1> m_iparm; // integer vector for the input parameters
mutable Array<double,DPARM_SIZE,1> m_dparm; // Scalar vector for the input parameters
mutable Matrix<StorageIndex,Dynamic,1> m_perm; // Permutation vector
mutable Matrix<StorageIndex,Dynamic,1> m_invp; // Inverse permutation vector
mutable int m_size; // Size of the matrix
};
/** Initialize the PaStiX data structure.
*A first call to this function fills iparm and dparm with the default PaStiX parameters
* \sa iparm() dparm()
*/
template <class Derived>
void PastixBase<Derived>::init()
{
m_size = 0;
m_iparm.setZero(IPARM_SIZE);
m_dparm.setZero(DPARM_SIZE);
m_iparm(IPARM_MODIFY_PARAMETER) = API_NO;
pastix(&m_pastixdata, MPI_COMM_WORLD,
0, 0, 0, 0,
0, 0, 0, 1, m_iparm.data(), m_dparm.data());
m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO;
m_iparm[IPARM_VERBOSE] = API_VERBOSE_NOT;
m_iparm[IPARM_ORDERING] = API_ORDER_SCOTCH;
m_iparm[IPARM_INCOMPLETE] = API_NO;
m_iparm[IPARM_OOC_LIMIT] = 2000;
m_iparm[IPARM_RHS_MAKING] = API_RHS_B;
m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
m_iparm(IPARM_START_TASK) = API_TASK_INIT;
m_iparm(IPARM_END_TASK) = API_TASK_INIT;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,
0, 0, 0, 0, m_iparm.data(), m_dparm.data());
// Check the returned error
if(m_iparm(IPARM_ERROR_NUMBER)) {
m_info = InvalidInput;
m_initisOk = false;
}
else {
m_info = Success;
m_initisOk = true;
}
}
template <class Derived>
void PastixBase<Derived>::compute(ColSpMatrix& mat)
{
eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared");
analyzePattern(mat);
factorize(mat);
m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
}
template <class Derived>
void PastixBase<Derived>::analyzePattern(ColSpMatrix& mat)
{
eigen_assert(m_initisOk && "The initialization of PaSTiX failed");
// clean previous calls
if(m_size>0)
clean();
m_size = internal::convert_index<int>(mat.rows());
m_perm.resize(m_size);
m_invp.resize(m_size);
m_iparm(IPARM_START_TASK) = API_TASK_ORDERING;
m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
// Check the returned error
if(m_iparm(IPARM_ERROR_NUMBER))
{
m_info = NumericalIssue;
m_analysisIsOk = false;
}
else
{
m_info = Success;
m_analysisIsOk = true;
}
}
template <class Derived>
void PastixBase<Derived>::factorize(ColSpMatrix& mat)
{
// if(&m_cpyMat != &mat) m_cpyMat = mat;
eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase");
m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT;
m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT;
m_size = internal::convert_index<int>(mat.rows());
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
// Check the returned error
if(m_iparm(IPARM_ERROR_NUMBER))
{
m_info = NumericalIssue;
m_factorizationIsOk = false;
m_isInitialized = false;
}
else
{
m_info = Success;
m_factorizationIsOk = true;
m_isInitialized = true;
}
}
/* Solve the system */
template<typename Base>
template<typename Rhs,typename Dest>
bool PastixBase<Base>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const
{
eigen_assert(m_isInitialized && "The matrix should be factorized first");
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
int rhs = 1;
x = b; /* on return, x is overwritten by the computed solution */
for (int i = 0; i < b.cols(); i++){
m_iparm[IPARM_START_TASK] = API_TASK_SOLVE;
m_iparm[IPARM_END_TASK] = API_TASK_REFINE;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index<int>(x.rows()), 0, 0, 0,
m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data());
}
// Check the returned error
m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue;
return m_iparm(IPARM_ERROR_NUMBER)==0;
}
/** \ingroup PaStiXSupport_Module
* \class PastixLU
* \brief Sparse direct LU solver based on PaStiX library
*
* This class is used to solve the linear systems A.X = B with a supernodal LU
* factorization in the PaStiX library. The matrix A should be squared and nonsingular
* PaStiX requires that the matrix A has a symmetric structural pattern.
* This interface can symmetrize the input matrix otherwise.
* The vectors or matrices X and B can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false
* NOTE : Note that if the analysis and factorization phase are called separately,
* the input matrix will be symmetrized at each call, hence it is advised to
* symmetrize the matrix in a end-user program and set \p IsStrSym to true
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SparseLU
*
*/
template<typename _MatrixType, bool IsStrSym>
class PastixLU : public PastixBase< PastixLU<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
typedef PastixBase<PastixLU<MatrixType> > Base;
typedef typename Base::ColSpMatrix ColSpMatrix;
typedef typename MatrixType::StorageIndex StorageIndex;
public:
PastixLU() : Base()
{
init();
}
explicit PastixLU(const MatrixType& matrix):Base()
{
init();
compute(matrix);
}
/** Compute the LU supernodal factorization of \p matrix.
* iparm and dparm can be used to tune the PaStiX parameters.
* see the PaStiX user's manual
* \sa analyzePattern() factorize()
*/
void compute (const MatrixType& matrix)
{
m_structureIsUptodate = false;
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::compute(temp);
}
/** Compute the LU symbolic factorization of \p matrix using its sparsity pattern.
* Several ordering methods can be used at this step. See the PaStiX user's manual.
* The result of this operation can be used with successive matrices having the same pattern as \p matrix
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
{
m_structureIsUptodate = false;
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::analyzePattern(temp);
}
/** Compute the LU supernodal factorization of \p matrix
* WARNING The matrix \p matrix should have the same structural pattern
* as the same used in the analysis phase.
* \sa analyzePattern()
*/
void factorize(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::factorize(temp);
}
protected:
void init()
{
m_structureIsUptodate = false;
m_iparm(IPARM_SYM) = API_SYM_NO;
m_iparm(IPARM_FACTORIZATION) = API_FACT_LU;
}
void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
{
if(IsStrSym)
out = matrix;
else
{
if(!m_structureIsUptodate)
{
// update the transposed structure
m_transposedStructure = matrix.transpose();
// Set the elements of the matrix to zero
for (Index j=0; j<m_transposedStructure.outerSize(); ++j)
for(typename ColSpMatrix::InnerIterator it(m_transposedStructure, j); it; ++it)
it.valueRef() = 0.0;
m_structureIsUptodate = true;
}
out = m_transposedStructure + matrix;
}
internal::c_to_fortran_numbering(out);
}
using Base::m_iparm;
using Base::m_dparm;
ColSpMatrix m_transposedStructure;
bool m_structureIsUptodate;
};
/** \ingroup PaStiXSupport_Module
* \class PastixLLT
* \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
*
* This class is used to solve the linear systems A.X = B via a LL^T supernodal Cholesky factorization
* available in the PaStiX library. The matrix A should be symmetric and positive definite
* WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
* The vectors or matrices X and B can be either dense or sparse
*
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SimplicialLLT
*/
template<typename _MatrixType, int _UpLo>
class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> >
{
public:
typedef _MatrixType MatrixType;
typedef PastixBase<PastixLLT<MatrixType, _UpLo> > Base;
typedef typename Base::ColSpMatrix ColSpMatrix;
public:
enum { UpLo = _UpLo };
PastixLLT() : Base()
{
init();
}
explicit PastixLLT(const MatrixType& matrix):Base()
{
init();
compute(matrix);
}
/** Compute the L factor of the LL^T supernodal factorization of \p matrix
* \sa analyzePattern() factorize()
*/
void compute (const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::compute(temp);
}
/** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern
* The result of this operation can be used with successive matrices having the same pattern as \p matrix
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::analyzePattern(temp);
}
/** Compute the LL^T supernodal numerical factorization of \p matrix
* \sa analyzePattern()
*/
void factorize(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::factorize(temp);
}
protected:
using Base::m_iparm;
void init()
{
m_iparm(IPARM_SYM) = API_SYM_YES;
m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT;
}
void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
{
out.resize(matrix.rows(), matrix.cols());
// Pastix supports only lower, column-major matrices
out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();
internal::c_to_fortran_numbering(out);
}
};
/** \ingroup PaStiXSupport_Module
* \class PastixLDLT
* \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
*
* This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization
* available in the PaStiX library. The matrix A should be symmetric and positive definite
* WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
* The vectors or matrices X and B can be either dense or sparse
*
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SimplicialLDLT
*/
template<typename _MatrixType, int _UpLo>
class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> >
{
public:
typedef _MatrixType MatrixType;
typedef PastixBase<PastixLDLT<MatrixType, _UpLo> > Base;
typedef typename Base::ColSpMatrix ColSpMatrix;
public:
enum { UpLo = _UpLo };
PastixLDLT():Base()
{
init();
}
explicit PastixLDLT(const MatrixType& matrix):Base()
{
init();
compute(matrix);
}
/** Compute the L and D factors of the LDL^T factorization of \p matrix
* \sa analyzePattern() factorize()
*/
void compute (const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::compute(temp);
}
/** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern
* The result of this operation can be used with successive matrices having the same pattern as \p matrix
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::analyzePattern(temp);
}
/** Compute the LDL^T supernodal numerical factorization of \p matrix
*
*/
void factorize(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::factorize(temp);
}
protected:
using Base::m_iparm;
void init()
{
m_iparm(IPARM_SYM) = API_SYM_YES;
m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT;
}
void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
{
// Pastix supports only lower, column-major matrices
out.resize(matrix.rows(), matrix.cols());
out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();
internal::c_to_fortran_numbering(out);
}
};
} // end namespace Eigen
#endif
| 22,218 | 31.723122 | 206 |
h
|
abess
|
abess-master/python/include/Eigen/src/PardisoSupport/PardisoSupport.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to Intel(R) MKL PARDISO
********************************************************************************
*/
#ifndef EIGEN_PARDISOSUPPORT_H
#define EIGEN_PARDISOSUPPORT_H
namespace Eigen {
template<typename _MatrixType> class PardisoLU;
template<typename _MatrixType, int Options=Upper> class PardisoLLT;
template<typename _MatrixType, int Options=Upper> class PardisoLDLT;
namespace internal
{
template<typename IndexType>
struct pardiso_run_selector
{
static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a,
IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x)
{
IndexType error = 0;
::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
return error;
}
};
template<>
struct pardiso_run_selector<long long int>
{
typedef long long int IndexType;
static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a,
IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x)
{
IndexType error = 0;
::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
return error;
}
};
template<class Pardiso> struct pardiso_traits;
template<typename _MatrixType>
struct pardiso_traits< PardisoLU<_MatrixType> >
{
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
struct pardiso_traits< PardisoLLT<_MatrixType, Options> >
{
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
struct pardiso_traits< PardisoLDLT<_MatrixType, Options> >
{
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
} // end namespace internal
template<class Derived>
class PardisoImpl : public SparseSolverBase<Derived>
{
protected:
typedef SparseSolverBase<Derived> Base;
using Base::derived;
using Base::m_isInitialized;
typedef internal::pardiso_traits<Derived> Traits;
public:
using Base::_solve_impl;
typedef typename Traits::MatrixType MatrixType;
typedef typename Traits::Scalar Scalar;
typedef typename Traits::RealScalar RealScalar;
typedef typename Traits::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,RowMajor,StorageIndex> SparseMatrixType;
typedef Matrix<Scalar,Dynamic,1> VectorType;
typedef Matrix<StorageIndex, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
typedef Matrix<StorageIndex, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
typedef Array<StorageIndex,64,1,DontAlign> ParameterType;
enum {
ScalarIsComplex = NumTraits<Scalar>::IsComplex,
ColsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic
};
PardisoImpl()
{
eigen_assert((sizeof(StorageIndex) >= sizeof(_INTEGER_t) && sizeof(StorageIndex) <= 8) && "Non-supported index type");
m_iparm.setZero();
m_msglvl = 0; // No output
m_isInitialized = false;
}
~PardisoImpl()
{
pardisoRelease();
}
inline Index cols() const { return m_size; }
inline Index rows() const { return m_size; }
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the matrix appears to be negative.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
/** \warning for advanced usage only.
* \returns a reference to the parameter array controlling PARDISO.
* See the PARDISO manual to know how to use it. */
ParameterType& pardisoParameterArray()
{
return m_iparm;
}
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
* \sa factorize()
*/
Derived& analyzePattern(const MatrixType& matrix);
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
*
* \sa analyzePattern()
*/
Derived& factorize(const MatrixType& matrix);
Derived& compute(const MatrixType& matrix);
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
protected:
void pardisoRelease()
{
if(m_isInitialized) // Factorization ran at least once
{
internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, -1, internal::convert_index<StorageIndex>(m_size),0, 0, 0, m_perm.data(), 0,
m_iparm.data(), m_msglvl, NULL, NULL);
m_isInitialized = false;
}
}
void pardisoInit(int type)
{
m_type = type;
bool symmetric = std::abs(m_type) < 10;
m_iparm[0] = 1; // No solver default
m_iparm[1] = 2; // use Metis for the ordering
m_iparm[2] = 0; // Reserved. Set to zero. (??Numbers of processors, value of OMP_NUM_THREADS??)
m_iparm[3] = 0; // No iterative-direct algorithm
m_iparm[4] = 0; // No user fill-in reducing permutation
m_iparm[5] = 0; // Write solution into x, b is left unchanged
m_iparm[6] = 0; // Not in use
m_iparm[7] = 2; // Max numbers of iterative refinement steps
m_iparm[8] = 0; // Not in use
m_iparm[9] = 13; // Perturb the pivot elements with 1E-13
m_iparm[10] = symmetric ? 0 : 1; // Use nonsymmetric permutation and scaling MPS
m_iparm[11] = 0; // Not in use
m_iparm[12] = symmetric ? 0 : 1; // Maximum weighted matching algorithm is switched-off (default for symmetric).
// Try m_iparm[12] = 1 in case of inappropriate accuracy
m_iparm[13] = 0; // Output: Number of perturbed pivots
m_iparm[14] = 0; // Not in use
m_iparm[15] = 0; // Not in use
m_iparm[16] = 0; // Not in use
m_iparm[17] = -1; // Output: Number of nonzeros in the factor LU
m_iparm[18] = -1; // Output: Mflops for LU factorization
m_iparm[19] = 0; // Output: Numbers of CG Iterations
m_iparm[20] = 0; // 1x1 pivoting
m_iparm[26] = 0; // No matrix checker
m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0;
m_iparm[34] = 1; // C indexing
m_iparm[36] = 0; // CSR
m_iparm[59] = 0; // 0 - In-Core ; 1 - Automatic switch between In-Core and Out-of-Core modes ; 2 - Out-of-Core
memset(m_pt, 0, sizeof(m_pt));
}
protected:
// cached data to reduce reallocation, etc.
void manageErrorCode(Index error) const
{
switch(error)
{
case 0:
m_info = Success;
break;
case -4:
case -7:
m_info = NumericalIssue;
break;
default:
m_info = InvalidInput;
}
}
mutable SparseMatrixType m_matrix;
mutable ComputationInfo m_info;
bool m_analysisIsOk, m_factorizationIsOk;
StorageIndex m_type, m_msglvl;
mutable void *m_pt[64];
mutable ParameterType m_iparm;
mutable IntColVectorType m_perm;
Index m_size;
};
template<class Derived>
Derived& PardisoImpl<Derived>::compute(const MatrixType& a)
{
m_size = a.rows();
eigen_assert(a.rows() == a.cols());
pardisoRelease();
m_perm.setZero(m_size);
derived().getMatrix(a);
Index error;
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 12, internal::convert_index<StorageIndex>(m_size),
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
manageErrorCode(error);
m_analysisIsOk = true;
m_factorizationIsOk = true;
m_isInitialized = true;
return derived();
}
template<class Derived>
Derived& PardisoImpl<Derived>::analyzePattern(const MatrixType& a)
{
m_size = a.rows();
eigen_assert(m_size == a.cols());
pardisoRelease();
m_perm.setZero(m_size);
derived().getMatrix(a);
Index error;
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 11, internal::convert_index<StorageIndex>(m_size),
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
manageErrorCode(error);
m_analysisIsOk = true;
m_factorizationIsOk = false;
m_isInitialized = true;
return derived();
}
template<class Derived>
Derived& PardisoImpl<Derived>::factorize(const MatrixType& a)
{
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
eigen_assert(m_size == a.rows() && m_size == a.cols());
derived().getMatrix(a);
Index error;
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 22, internal::convert_index<StorageIndex>(m_size),
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
manageErrorCode(error);
m_factorizationIsOk = true;
return derived();
}
template<class Derived>
template<typename BDerived,typename XDerived>
void PardisoImpl<Derived>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const
{
if(m_iparm[0] == 0) // Factorization was not computed
{
m_info = InvalidInput;
return;
}
//Index n = m_matrix.rows();
Index nrhs = Index(b.cols());
eigen_assert(m_size==b.rows());
eigen_assert(((MatrixBase<BDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major right hand sides are not supported");
eigen_assert(((MatrixBase<XDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major matrices of unknowns are not supported");
eigen_assert(((nrhs == 1) || b.outerStride() == b.rows()));
// switch (transposed) {
// case SvNoTrans : m_iparm[11] = 0 ; break;
// case SvTranspose : m_iparm[11] = 2 ; break;
// case SvAdjoint : m_iparm[11] = 1 ; break;
// default:
// //std::cerr << "Eigen: transposition option \"" << transposed << "\" not supported by the PARDISO backend\n";
// m_iparm[11] = 0;
// }
Scalar* rhs_ptr = const_cast<Scalar*>(b.derived().data());
Matrix<Scalar,Dynamic,Dynamic,ColMajor> tmp;
// Pardiso cannot solve in-place
if(rhs_ptr == x.derived().data())
{
tmp = b;
rhs_ptr = tmp.data();
}
Index error;
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 33, internal::convert_index<StorageIndex>(m_size),
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), internal::convert_index<StorageIndex>(nrhs), m_iparm.data(), m_msglvl,
rhs_ptr, x.derived().data());
manageErrorCode(error);
}
/** \ingroup PardisoSupport_Module
* \class PardisoLU
* \brief A sparse direct LU factorization and solver based on the PARDISO library
*
* This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
* using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible.
* The vectors or matrices X and B can be either dense or sparse.
*
* By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set:
* \code solver.pardisoParameterArray()[59] = 1; \endcode
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SparseLU
*/
template<typename MatrixType>
class PardisoLU : public PardisoImpl< PardisoLU<MatrixType> >
{
protected:
typedef PardisoImpl<PardisoLU> Base;
typedef typename Base::Scalar Scalar;
typedef typename Base::RealScalar RealScalar;
using Base::pardisoInit;
using Base::m_matrix;
friend class PardisoImpl< PardisoLU<MatrixType> >;
public:
using Base::compute;
using Base::solve;
PardisoLU()
: Base()
{
pardisoInit(Base::ScalarIsComplex ? 13 : 11);
}
explicit PardisoLU(const MatrixType& matrix)
: Base()
{
pardisoInit(Base::ScalarIsComplex ? 13 : 11);
compute(matrix);
}
protected:
void getMatrix(const MatrixType& matrix)
{
m_matrix = matrix;
m_matrix.makeCompressed();
}
};
/** \ingroup PardisoSupport_Module
* \class PardisoLLT
* \brief A sparse direct Cholesky (LLT) factorization and solver based on the PARDISO library
*
* This class allows to solve for A.X = B sparse linear problems via a LL^T Cholesky factorization
* using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite.
* The vectors or matrices X and B can be either dense or sparse.
*
* By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set:
* \code solver.pardisoParameterArray()[59] = 1; \endcode
*
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used.
* Upper|Lower can be used to tell both triangular parts can be used as input.
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SimplicialLLT
*/
template<typename MatrixType, int _UpLo>
class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
{
protected:
typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
typedef typename Base::Scalar Scalar;
typedef typename Base::RealScalar RealScalar;
using Base::pardisoInit;
using Base::m_matrix;
friend class PardisoImpl< PardisoLLT<MatrixType,_UpLo> >;
public:
typedef typename Base::StorageIndex StorageIndex;
enum { UpLo = _UpLo };
using Base::compute;
PardisoLLT()
: Base()
{
pardisoInit(Base::ScalarIsComplex ? 4 : 2);
}
explicit PardisoLLT(const MatrixType& matrix)
: Base()
{
pardisoInit(Base::ScalarIsComplex ? 4 : 2);
compute(matrix);
}
protected:
void getMatrix(const MatrixType& matrix)
{
// PARDISO supports only upper, row-major matrices
PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null;
m_matrix.resize(matrix.rows(), matrix.cols());
m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
m_matrix.makeCompressed();
}
};
/** \ingroup PardisoSupport_Module
* \class PardisoLDLT
* \brief A sparse direct Cholesky (LDLT) factorization and solver based on the PARDISO library
*
* This class allows to solve for A.X = B sparse linear problems via a LDL^T Cholesky factorization
* using the Intel MKL PARDISO library. The sparse matrix A is assumed to be selfajoint and positive definite.
* For complex matrices, A can also be symmetric only, see the \a Options template parameter.
* The vectors or matrices X and B can be either dense or sparse.
*
* By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set:
* \code solver.pardisoParameterArray()[59] = 1; \endcode
*
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used.
* Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix.
* Upper|Lower can be used to tell both triangular parts can be used as input.
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SimplicialLDLT
*/
template<typename MatrixType, int Options>
class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
{
protected:
typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
typedef typename Base::Scalar Scalar;
typedef typename Base::RealScalar RealScalar;
using Base::pardisoInit;
using Base::m_matrix;
friend class PardisoImpl< PardisoLDLT<MatrixType,Options> >;
public:
typedef typename Base::StorageIndex StorageIndex;
using Base::compute;
enum { UpLo = Options&(Upper|Lower) };
PardisoLDLT()
: Base()
{
pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
}
explicit PardisoLDLT(const MatrixType& matrix)
: Base()
{
pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
compute(matrix);
}
void getMatrix(const MatrixType& matrix)
{
// PARDISO supports only upper, row-major matrices
PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null;
m_matrix.resize(matrix.rows(), matrix.cols());
m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
m_matrix.makeCompressed();
}
};
} // end namespace Eigen
#endif // EIGEN_PARDISOSUPPORT_H
| 20,032 | 35.825368 | 159 |
h
|
abess
|
abess-master/python/include/Eigen/src/QR/ColPivHouseholderQR.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
namespace Eigen {
namespace internal {
template<typename _MatrixType> struct traits<ColPivHouseholderQR<_MatrixType> >
: traits<_MatrixType>
{
enum { Flags = 0 };
};
} // end namespace internal
/** \ingroup QR_Module
*
* \class ColPivHouseholderQR
*
* \brief Householder rank-revealing QR decomposition of a matrix with column-pivoting
*
* \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition
*
* This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b Q and \b R
* such that
* \f[
* \mathbf{A} \, \mathbf{P} = \mathbf{Q} \, \mathbf{R}
* \f]
* by using Householder transformations. Here, \b P is a permutation matrix, \b Q a unitary matrix and \b R an
* upper triangular matrix.
*
* This decomposition performs column pivoting in order to be rank-revealing and improve
* numerical stability. It is slower than HouseholderQR, and faster than FullPivHouseholderQR.
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::colPivHouseholderQr()
*/
template<typename _MatrixType> class ColPivHouseholderQR
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
// FIXME should be int
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
typedef typename internal::plain_row_type<MatrixType, RealScalar>::type RealRowVectorType;
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;
typedef typename MatrixType::PlainObject PlainObject;
private:
typedef typename PermutationType::StorageIndex PermIndexType;
public:
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via ColPivHouseholderQR::compute(const MatrixType&).
*/
ColPivHouseholderQR()
: m_qr(),
m_hCoeffs(),
m_colsPermutation(),
m_colsTranspositions(),
m_temp(),
m_colNormsUpdated(),
m_colNormsDirect(),
m_isInitialized(false),
m_usePrescribedThreshold(false) {}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa ColPivHouseholderQR()
*/
ColPivHouseholderQR(Index rows, Index cols)
: m_qr(rows, cols),
m_hCoeffs((std::min)(rows,cols)),
m_colsPermutation(PermIndexType(cols)),
m_colsTranspositions(cols),
m_temp(cols),
m_colNormsUpdated(cols),
m_colNormsDirect(cols),
m_isInitialized(false),
m_usePrescribedThreshold(false) {}
/** \brief Constructs a QR factorization from a given matrix
*
* This constructor computes the QR factorization of the matrix \a matrix by calling
* the method compute(). It is a short cut for:
*
* \code
* ColPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
* qr.compute(matrix);
* \endcode
*
* \sa compute()
*/
template<typename InputType>
explicit ColPivHouseholderQR(const EigenBase<InputType>& matrix)
: m_qr(matrix.rows(), matrix.cols()),
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
m_colsPermutation(PermIndexType(matrix.cols())),
m_colsTranspositions(matrix.cols()),
m_temp(matrix.cols()),
m_colNormsUpdated(matrix.cols()),
m_colNormsDirect(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
compute(matrix.derived());
}
/** \brief Constructs a QR factorization from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref.
*
* \sa ColPivHouseholderQR(const EigenBase&)
*/
template<typename InputType>
explicit ColPivHouseholderQR(EigenBase<InputType>& matrix)
: m_qr(matrix.derived()),
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
m_colsPermutation(PermIndexType(matrix.cols())),
m_colsTranspositions(matrix.cols()),
m_temp(matrix.cols()),
m_colNormsUpdated(matrix.cols()),
m_colNormsDirect(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
computeInPlace();
}
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* *this is the QR decomposition, if any exists.
*
* \param b the right-hand-side of the equation to solve.
*
* \returns a solution.
*
* \note_about_checking_solutions
*
* \note_about_arbitrary_choice_of_solution
*
* Example: \include ColPivHouseholderQR_solve.cpp
* Output: \verbinclude ColPivHouseholderQR_solve.out
*/
template<typename Rhs>
inline const Solve<ColPivHouseholderQR, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return Solve<ColPivHouseholderQR, Rhs>(*this, b.derived());
}
HouseholderSequenceType householderQ() const;
HouseholderSequenceType matrixQ() const
{
return householderQ();
}
/** \returns a reference to the matrix where the Householder QR decomposition is stored
*/
const MatrixType& matrixQR() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return m_qr;
}
/** \returns a reference to the matrix where the result Householder QR is stored
* \warning The strict lower part of this matrix contains internal values.
* Only the upper triangular part should be referenced. To get it, use
* \code matrixR().template triangularView<Upper>() \endcode
* For rank-deficient matrices, use
* \code
* matrixR().topLeftCorner(rank(), rank()).template triangularView<Upper>()
* \endcode
*/
const MatrixType& matrixR() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return m_qr;
}
template<typename InputType>
ColPivHouseholderQR& compute(const EigenBase<InputType>& matrix);
/** \returns a const reference to the column permutation matrix */
const PermutationType& colsPermutation() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return m_colsPermutation;
}
/** \returns the absolute value of the determinant of the matrix of which
* *this is the QR decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the QR decomposition has already been computed.
*
* \note This is only for square matrices.
*
* \warning a determinant can be very big or small, so for matrices
* of large enough dimension, there is a risk of overflow/underflow.
* One way to work around that is to use logAbsDeterminant() instead.
*
* \sa logAbsDeterminant(), MatrixBase::determinant()
*/
typename MatrixType::RealScalar absDeterminant() const;
/** \returns the natural log of the absolute value of the determinant of the matrix of which
* *this is the QR decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the QR decomposition has already been computed.
*
* \note This is only for square matrices.
*
* \note This method is useful to work around the risk of overflow/underflow that's inherent
* to determinant computation.
*
* \sa absDeterminant(), MatrixBase::determinant()
*/
typename MatrixType::RealScalar logAbsDeterminant() const;
/** \returns the rank of the matrix of which *this is the QR decomposition.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index rank() const
{
using std::abs;
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
Index result = 0;
for(Index i = 0; i < m_nonzero_pivots; ++i)
result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);
return result;
}
/** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index dimensionOfKernel() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return cols() - rank();
}
/** \returns true if the matrix of which *this is the QR decomposition represents an injective
* linear map, i.e. has trivial kernel; false otherwise.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isInjective() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return rank() == cols();
}
/** \returns true if the matrix of which *this is the QR decomposition represents a surjective
* linear map; false otherwise.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isSurjective() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return rank() == rows();
}
/** \returns true if the matrix of which *this is the QR decomposition is invertible.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isInvertible() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return isInjective() && isSurjective();
}
/** \returns the inverse of the matrix of which *this is the QR decomposition.
*
* \note If this matrix is not invertible, the returned matrix has undefined coefficients.
* Use isInvertible() to first determine whether this matrix is invertible.
*/
inline const Inverse<ColPivHouseholderQR> inverse() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return Inverse<ColPivHouseholderQR>(*this);
}
inline Index rows() const { return m_qr.rows(); }
inline Index cols() const { return m_qr.cols(); }
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
*
* For advanced uses only.
*/
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
/** Allows to prescribe a threshold to be used by certain methods, such as rank(),
* who need to determine when pivots are to be considered nonzero. This is not used for the
* QR decomposition itself.
*
* When it needs to get the threshold value, Eigen calls threshold(). By default, this
* uses a formula to automatically determine a reasonable threshold.
* Once you have called the present method setThreshold(const RealScalar&),
* your value is used instead.
*
* \param threshold The new value to use as the threshold.
*
* A pivot will be considered nonzero if its absolute value is strictly greater than
* \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
* where maxpivot is the biggest pivot.
*
* If you want to come back to the default behavior, call setThreshold(Default_t)
*/
ColPivHouseholderQR& setThreshold(const RealScalar& threshold)
{
m_usePrescribedThreshold = true;
m_prescribedThreshold = threshold;
return *this;
}
/** Allows to come back to the default behavior, letting Eigen use its default formula for
* determining the threshold.
*
* You should pass the special object Eigen::Default as parameter here.
* \code qr.setThreshold(Eigen::Default); \endcode
*
* See the documentation of setThreshold(const RealScalar&).
*/
ColPivHouseholderQR& setThreshold(Default_t)
{
m_usePrescribedThreshold = false;
return *this;
}
/** Returns the threshold that will be used by certain methods such as rank().
*
* See the documentation of setThreshold(const RealScalar&).
*/
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
: NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());
}
/** \returns the number of nonzero pivots in the QR decomposition.
* Here nonzero is meant in the exact sense, not in a fuzzy sense.
* So that notion isn't really intrinsically interesting, but it is
* still useful when implementing algorithms.
*
* \sa rank()
*/
inline Index nonzeroPivots() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return m_nonzero_pivots;
}
/** \returns the absolute value of the biggest pivot, i.e. the biggest
* diagonal coefficient of R.
*/
RealScalar maxPivot() const { return m_maxpivot; }
/** \brief Reports whether the QR factorization was succesful.
*
* \note This function always returns \c Success. It is provided for compatibility
* with other factorization routines.
* \returns \c Success
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return Success;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
protected:
friend class CompleteOrthogonalDecomposition<MatrixType>;
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void computeInPlace();
MatrixType m_qr;
HCoeffsType m_hCoeffs;
PermutationType m_colsPermutation;
IntRowVectorType m_colsTranspositions;
RowVectorType m_temp;
RealRowVectorType m_colNormsUpdated;
RealRowVectorType m_colNormsDirect;
bool m_isInitialized, m_usePrescribedThreshold;
RealScalar m_prescribedThreshold, m_maxpivot;
Index m_nonzero_pivots;
Index m_det_pq;
};
template<typename MatrixType>
typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::absDeterminant() const
{
using std::abs;
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
return abs(m_qr.diagonal().prod());
}
template<typename MatrixType>
typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDeterminant() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
return m_qr.diagonal().cwiseAbs().array().log().sum();
}
/** Performs the QR factorization of the given matrix \a matrix. The result of
* the factorization is stored into \c *this, and a reference to \c *this
* is returned.
*
* \sa class ColPivHouseholderQR, ColPivHouseholderQR(const MatrixType&)
*/
template<typename MatrixType>
template<typename InputType>
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const EigenBase<InputType>& matrix)
{
m_qr = matrix.derived();
computeInPlace();
return *this;
}
template<typename MatrixType>
void ColPivHouseholderQR<MatrixType>::computeInPlace()
{
check_template_parameters();
// the column permutation is stored as int indices, so just to be sure:
eigen_assert(m_qr.cols()<=NumTraits<int>::highest());
using std::abs;
Index rows = m_qr.rows();
Index cols = m_qr.cols();
Index size = m_qr.diagonalSize();
m_hCoeffs.resize(size);
m_temp.resize(cols);
m_colsTranspositions.resize(m_qr.cols());
Index number_of_transpositions = 0;
m_colNormsUpdated.resize(cols);
m_colNormsDirect.resize(cols);
for (Index k = 0; k < cols; ++k) {
// colNormsDirect(k) caches the most recent directly computed norm of
// column k.
m_colNormsDirect.coeffRef(k) = m_qr.col(k).norm();
m_colNormsUpdated.coeffRef(k) = m_colNormsDirect.coeffRef(k);
}
RealScalar threshold_helper = numext::abs2<RealScalar>(m_colNormsUpdated.maxCoeff() * NumTraits<RealScalar>::epsilon()) / RealScalar(rows);
RealScalar norm_downdate_threshold = numext::sqrt(NumTraits<RealScalar>::epsilon());
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0);
for(Index k = 0; k < size; ++k)
{
// first, we look up in our table m_colNormsUpdated which column has the biggest norm
Index biggest_col_index;
RealScalar biggest_col_sq_norm = numext::abs2(m_colNormsUpdated.tail(cols-k).maxCoeff(&biggest_col_index));
biggest_col_index += k;
// Track the number of meaningful pivots but do not stop the decomposition to make
// sure that the initial matrix is properly reproduced. See bug 941.
if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
m_nonzero_pivots = k;
// apply the transposition to the columns
m_colsTranspositions.coeffRef(k) = biggest_col_index;
if(k != biggest_col_index) {
m_qr.col(k).swap(m_qr.col(biggest_col_index));
std::swap(m_colNormsUpdated.coeffRef(k), m_colNormsUpdated.coeffRef(biggest_col_index));
std::swap(m_colNormsDirect.coeffRef(k), m_colNormsDirect.coeffRef(biggest_col_index));
++number_of_transpositions;
}
// generate the householder vector, store it below the diagonal
RealScalar beta;
m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
// apply the householder transformation to the diagonal coefficient
m_qr.coeffRef(k,k) = beta;
// remember the maximum absolute value of diagonal coefficients
if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta);
// apply the householder transformation
m_qr.bottomRightCorner(rows-k, cols-k-1)
.applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));
// update our table of norms of the columns
for (Index j = k + 1; j < cols; ++j) {
// The following implements the stable norm downgrade step discussed in
// http://www.netlib.org/lapack/lawnspdf/lawn176.pdf
// and used in LAPACK routines xGEQPF and xGEQP3.
// See lines 278-297 in http://www.netlib.org/lapack/explore-html/dc/df4/sgeqpf_8f_source.html
if (m_colNormsUpdated.coeffRef(j) != RealScalar(0)) {
RealScalar temp = abs(m_qr.coeffRef(k, j)) / m_colNormsUpdated.coeffRef(j);
temp = (RealScalar(1) + temp) * (RealScalar(1) - temp);
temp = temp < RealScalar(0) ? RealScalar(0) : temp;
RealScalar temp2 = temp * numext::abs2<RealScalar>(m_colNormsUpdated.coeffRef(j) /
m_colNormsDirect.coeffRef(j));
if (temp2 <= norm_downdate_threshold) {
// The updated norm has become too inaccurate so re-compute the column
// norm directly.
m_colNormsDirect.coeffRef(j) = m_qr.col(j).tail(rows - k - 1).norm();
m_colNormsUpdated.coeffRef(j) = m_colNormsDirect.coeffRef(j);
} else {
m_colNormsUpdated.coeffRef(j) *= numext::sqrt(temp);
}
}
}
}
m_colsPermutation.setIdentity(PermIndexType(cols));
for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k)
m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
m_isInitialized = true;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename _MatrixType>
template<typename RhsType, typename DstType>
void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
eigen_assert(rhs.rows() == rows());
const Index nonzero_pivots = nonzeroPivots();
if(nonzero_pivots == 0)
{
dst.setZero();
return;
}
typename RhsType::PlainObject c(rhs);
// Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
c.applyOnTheLeft(householderSequence(m_qr, m_hCoeffs)
.setLength(nonzero_pivots)
.transpose()
);
m_qr.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
.solveInPlace(c.topRows(nonzero_pivots));
for(Index i = 0; i < nonzero_pivots; ++i) dst.row(m_colsPermutation.indices().coeff(i)) = c.row(i);
for(Index i = nonzero_pivots; i < cols(); ++i) dst.row(m_colsPermutation.indices().coeff(i)).setZero();
}
#endif
namespace internal {
template<typename DstXprType, typename MatrixType>
struct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename ColPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense>
{
typedef ColPivHouseholderQR<MatrixType> QrType;
typedef Inverse<QrType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)
{
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
}
};
} // end namespace internal
/** \returns the matrix Q as a sequence of householder transformations.
* You can extract the meaningful part only by using:
* \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/
template<typename MatrixType>
typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>
::householderQ() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
}
/** \return the column-pivoting Householder QR decomposition of \c *this.
*
* \sa class ColPivHouseholderQR
*/
template<typename Derived>
const ColPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::colPivHouseholderQr() const
{
return ColPivHouseholderQR<PlainObject>(eval());
}
} // end namespace Eigen
#endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
| 24,881 | 37.045872 | 184 |
h
|
abess
|
abess-master/python/include/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Householder QR decomposition of a matrix with column pivoting based on
* LAPACKE_?geqp3 function.
********************************************************************************
*/
#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H
#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_QR_COLPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \
template<> template<typename InputType> inline \
ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >& \
ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >::compute( \
const EigenBase<InputType>& matrix) \
\
{ \
using std::abs; \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
typedef MatrixType::RealScalar RealScalar; \
Index rows = matrix.rows();\
Index cols = matrix.cols();\
\
m_qr = matrix;\
Index size = m_qr.diagonalSize();\
m_hCoeffs.resize(size);\
\
m_colsTranspositions.resize(cols);\
/*Index number_of_transpositions = 0;*/ \
\
m_nonzero_pivots = 0; \
m_maxpivot = RealScalar(0);\
m_colsPermutation.resize(cols); \
m_colsPermutation.indices().setZero(); \
\
lapack_int lda = internal::convert_index<lapack_int,Index>(m_qr.outerStride()); \
lapack_int matrix_order = LAPACKE_COLROW; \
LAPACKE_##LAPACKE_PREFIX##geqp3( matrix_order, internal::convert_index<lapack_int,Index>(rows), internal::convert_index<lapack_int,Index>(cols), \
(LAPACKE_TYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (LAPACKE_TYPE*)m_hCoeffs.data()); \
m_isInitialized = true; \
m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \
m_hCoeffs.adjointInPlace(); \
RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); \
lapack_int *perm = m_colsPermutation.indices().data(); \
for(Index i=0;i<size;i++) { \
m_nonzero_pivots += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);\
} \
for(Index i=0;i<cols;i++) perm[i]--;\
\
/*m_det_pq = (number_of_transpositions%2) ? -1 : 1; // TODO: It's not needed now; fix upon availability in Eigen */ \
\
return *this; \
}
EIGEN_LAPACKE_QR_COLPIV(double, double, d, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_QR_COLPIV(float, float, s, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_QR_COLPIV(dcomplex, lapack_complex_double, z, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_QR_COLPIV(scomplex, lapack_complex_float, c, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_QR_COLPIV(double, double, d, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_QR_COLPIV(float, float, s, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_QR_COLPIV(dcomplex, lapack_complex_double, z, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_QR_COLPIV(scomplex, lapack_complex_float, c, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen
#endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H
| 4,662 | 46.581633 | 148 |
h
|
abess
|
abess-master/python/include/Eigen/src/QR/CompleteOrthogonalDecomposition.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Rasmus Munk Larsen <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H
#define EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H
namespace Eigen {
namespace internal {
template <typename _MatrixType>
struct traits<CompleteOrthogonalDecomposition<_MatrixType> >
: traits<_MatrixType> {
enum { Flags = 0 };
};
} // end namespace internal
/** \ingroup QR_Module
*
* \class CompleteOrthogonalDecomposition
*
* \brief Complete orthogonal decomposition (COD) of a matrix.
*
* \param MatrixType the type of the matrix of which we are computing the COD.
*
* This class performs a rank-revealing complete orthogonal decomposition of a
* matrix \b A into matrices \b P, \b Q, \b T, and \b Z such that
* \f[
* \mathbf{A} \, \mathbf{P} = \mathbf{Q} \,
* \begin{bmatrix} \mathbf{T} & \mathbf{0} \\
* \mathbf{0} & \mathbf{0} \end{bmatrix} \, \mathbf{Z}
* \f]
* by using Householder transformations. Here, \b P is a permutation matrix,
* \b Q and \b Z are unitary matrices and \b T an upper triangular matrix of
* size rank-by-rank. \b A may be rank deficient.
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::completeOrthogonalDecomposition()
*/
template <typename _MatrixType>
class CompleteOrthogonalDecomposition {
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime>
PermutationType;
typedef typename internal::plain_row_type<MatrixType, Index>::type
IntRowVectorType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
typedef typename internal::plain_row_type<MatrixType, RealScalar>::type
RealRowVectorType;
typedef HouseholderSequence<
MatrixType, typename internal::remove_all<
typename HCoeffsType::ConjugateReturnType>::type>
HouseholderSequenceType;
typedef typename MatrixType::PlainObject PlainObject;
private:
typedef typename PermutationType::Index PermIndexType;
public:
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via
* \c CompleteOrthogonalDecomposition::compute(const* MatrixType&).
*/
CompleteOrthogonalDecomposition() : m_cpqr(), m_zCoeffs(), m_temp() {}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa CompleteOrthogonalDecomposition()
*/
CompleteOrthogonalDecomposition(Index rows, Index cols)
: m_cpqr(rows, cols), m_zCoeffs((std::min)(rows, cols)), m_temp(cols) {}
/** \brief Constructs a complete orthogonal decomposition from a given
* matrix.
*
* This constructor computes the complete orthogonal decomposition of the
* matrix \a matrix by calling the method compute(). The default
* threshold for rank determination will be used. It is a short cut for:
*
* \code
* CompleteOrthogonalDecomposition<MatrixType> cod(matrix.rows(),
* matrix.cols());
* cod.setThreshold(Default);
* cod.compute(matrix);
* \endcode
*
* \sa compute()
*/
template <typename InputType>
explicit CompleteOrthogonalDecomposition(const EigenBase<InputType>& matrix)
: m_cpqr(matrix.rows(), matrix.cols()),
m_zCoeffs((std::min)(matrix.rows(), matrix.cols())),
m_temp(matrix.cols())
{
compute(matrix.derived());
}
/** \brief Constructs a complete orthogonal decomposition from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref.
*
* \sa CompleteOrthogonalDecomposition(const EigenBase&)
*/
template<typename InputType>
explicit CompleteOrthogonalDecomposition(EigenBase<InputType>& matrix)
: m_cpqr(matrix.derived()),
m_zCoeffs((std::min)(matrix.rows(), matrix.cols())),
m_temp(matrix.cols())
{
computeInPlace();
}
/** This method computes the minimum-norm solution X to a least squares
* problem \f[\mathrm{minimize} \|A X - B\|, \f] where \b A is the matrix of
* which \c *this is the complete orthogonal decomposition.
*
* \param b the right-hand sides of the problem to solve.
*
* \returns a solution.
*
*/
template <typename Rhs>
inline const Solve<CompleteOrthogonalDecomposition, Rhs> solve(
const MatrixBase<Rhs>& b) const {
eigen_assert(m_cpqr.m_isInitialized &&
"CompleteOrthogonalDecomposition is not initialized.");
return Solve<CompleteOrthogonalDecomposition, Rhs>(*this, b.derived());
}
HouseholderSequenceType householderQ(void) const;
HouseholderSequenceType matrixQ(void) const { return m_cpqr.householderQ(); }
/** \returns the matrix \b Z.
*/
MatrixType matrixZ() const {
MatrixType Z = MatrixType::Identity(m_cpqr.cols(), m_cpqr.cols());
applyZAdjointOnTheLeftInPlace(Z);
return Z.adjoint();
}
/** \returns a reference to the matrix where the complete orthogonal
* decomposition is stored
*/
const MatrixType& matrixQTZ() const { return m_cpqr.matrixQR(); }
/** \returns a reference to the matrix where the complete orthogonal
* decomposition is stored.
* \warning The strict lower part and \code cols() - rank() \endcode right
* columns of this matrix contains internal values.
* Only the upper triangular part should be referenced. To get it, use
* \code matrixT().template triangularView<Upper>() \endcode
* For rank-deficient matrices, use
* \code
* matrixR().topLeftCorner(rank(), rank()).template triangularView<Upper>()
* \endcode
*/
const MatrixType& matrixT() const { return m_cpqr.matrixQR(); }
template <typename InputType>
CompleteOrthogonalDecomposition& compute(const EigenBase<InputType>& matrix) {
// Compute the column pivoted QR factorization A P = Q R.
m_cpqr.compute(matrix);
computeInPlace();
return *this;
}
/** \returns a const reference to the column permutation matrix */
const PermutationType& colsPermutation() const {
return m_cpqr.colsPermutation();
}
/** \returns the absolute value of the determinant of the matrix of which
* *this is the complete orthogonal decomposition. It has only linear
* complexity (that is, O(n) where n is the dimension of the square matrix)
* as the complete orthogonal decomposition has already been computed.
*
* \note This is only for square matrices.
*
* \warning a determinant can be very big or small, so for matrices
* of large enough dimension, there is a risk of overflow/underflow.
* One way to work around that is to use logAbsDeterminant() instead.
*
* \sa logAbsDeterminant(), MatrixBase::determinant()
*/
typename MatrixType::RealScalar absDeterminant() const;
/** \returns the natural log of the absolute value of the determinant of the
* matrix of which *this is the complete orthogonal decomposition. It has
* only linear complexity (that is, O(n) where n is the dimension of the
* square matrix) as the complete orthogonal decomposition has already been
* computed.
*
* \note This is only for square matrices.
*
* \note This method is useful to work around the risk of overflow/underflow
* that's inherent to determinant computation.
*
* \sa absDeterminant(), MatrixBase::determinant()
*/
typename MatrixType::RealScalar logAbsDeterminant() const;
/** \returns the rank of the matrix of which *this is the complete orthogonal
* decomposition.
*
* \note This method has to determine which pivots should be considered
* nonzero. For that, it uses the threshold value that you can control by
* calling setThreshold(const RealScalar&).
*/
inline Index rank() const { return m_cpqr.rank(); }
/** \returns the dimension of the kernel of the matrix of which *this is the
* complete orthogonal decomposition.
*
* \note This method has to determine which pivots should be considered
* nonzero. For that, it uses the threshold value that you can control by
* calling setThreshold(const RealScalar&).
*/
inline Index dimensionOfKernel() const { return m_cpqr.dimensionOfKernel(); }
/** \returns true if the matrix of which *this is the decomposition represents
* an injective linear map, i.e. has trivial kernel; false otherwise.
*
* \note This method has to determine which pivots should be considered
* nonzero. For that, it uses the threshold value that you can control by
* calling setThreshold(const RealScalar&).
*/
inline bool isInjective() const { return m_cpqr.isInjective(); }
/** \returns true if the matrix of which *this is the decomposition represents
* a surjective linear map; false otherwise.
*
* \note This method has to determine which pivots should be considered
* nonzero. For that, it uses the threshold value that you can control by
* calling setThreshold(const RealScalar&).
*/
inline bool isSurjective() const { return m_cpqr.isSurjective(); }
/** \returns true if the matrix of which *this is the complete orthogonal
* decomposition is invertible.
*
* \note This method has to determine which pivots should be considered
* nonzero. For that, it uses the threshold value that you can control by
* calling setThreshold(const RealScalar&).
*/
inline bool isInvertible() const { return m_cpqr.isInvertible(); }
/** \returns the pseudo-inverse of the matrix of which *this is the complete
* orthogonal decomposition.
* \warning: Do not compute \c this->pseudoInverse()*rhs to solve a linear systems.
* It is more efficient and numerically stable to call \c this->solve(rhs).
*/
inline const Inverse<CompleteOrthogonalDecomposition> pseudoInverse() const
{
return Inverse<CompleteOrthogonalDecomposition>(*this);
}
inline Index rows() const { return m_cpqr.rows(); }
inline Index cols() const { return m_cpqr.cols(); }
/** \returns a const reference to the vector of Householder coefficients used
* to represent the factor \c Q.
*
* For advanced uses only.
*/
inline const HCoeffsType& hCoeffs() const { return m_cpqr.hCoeffs(); }
/** \returns a const reference to the vector of Householder coefficients
* used to represent the factor \c Z.
*
* For advanced uses only.
*/
const HCoeffsType& zCoeffs() const { return m_zCoeffs; }
/** Allows to prescribe a threshold to be used by certain methods, such as
* rank(), who need to determine when pivots are to be considered nonzero.
* Most be called before calling compute().
*
* When it needs to get the threshold value, Eigen calls threshold(). By
* default, this uses a formula to automatically determine a reasonable
* threshold. Once you have called the present method
* setThreshold(const RealScalar&), your value is used instead.
*
* \param threshold The new value to use as the threshold.
*
* A pivot will be considered nonzero if its absolute value is strictly
* greater than
* \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
* where maxpivot is the biggest pivot.
*
* If you want to come back to the default behavior, call
* setThreshold(Default_t)
*/
CompleteOrthogonalDecomposition& setThreshold(const RealScalar& threshold) {
m_cpqr.setThreshold(threshold);
return *this;
}
/** Allows to come back to the default behavior, letting Eigen use its default
* formula for determining the threshold.
*
* You should pass the special object Eigen::Default as parameter here.
* \code qr.setThreshold(Eigen::Default); \endcode
*
* See the documentation of setThreshold(const RealScalar&).
*/
CompleteOrthogonalDecomposition& setThreshold(Default_t) {
m_cpqr.setThreshold(Default);
return *this;
}
/** Returns the threshold that will be used by certain methods such as rank().
*
* See the documentation of setThreshold(const RealScalar&).
*/
RealScalar threshold() const { return m_cpqr.threshold(); }
/** \returns the number of nonzero pivots in the complete orthogonal
* decomposition. Here nonzero is meant in the exact sense, not in a
* fuzzy sense. So that notion isn't really intrinsically interesting,
* but it is still useful when implementing algorithms.
*
* \sa rank()
*/
inline Index nonzeroPivots() const { return m_cpqr.nonzeroPivots(); }
/** \returns the absolute value of the biggest pivot, i.e. the biggest
* diagonal coefficient of R.
*/
inline RealScalar maxPivot() const { return m_cpqr.maxPivot(); }
/** \brief Reports whether the complete orthogonal decomposition was
* succesful.
*
* \note This function always returns \c Success. It is provided for
* compatibility
* with other factorization routines.
* \returns \c Success
*/
ComputationInfo info() const {
eigen_assert(m_cpqr.m_isInitialized && "Decomposition is not initialized.");
return Success;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template <typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC void _solve_impl(const RhsType& rhs, DstType& dst) const;
#endif
protected:
static void check_template_parameters() {
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void computeInPlace();
/** Overwrites \b rhs with \f$ \mathbf{Z}^* * \mathbf{rhs} \f$.
*/
template <typename Rhs>
void applyZAdjointOnTheLeftInPlace(Rhs& rhs) const;
ColPivHouseholderQR<MatrixType> m_cpqr;
HCoeffsType m_zCoeffs;
RowVectorType m_temp;
};
template <typename MatrixType>
typename MatrixType::RealScalar
CompleteOrthogonalDecomposition<MatrixType>::absDeterminant() const {
return m_cpqr.absDeterminant();
}
template <typename MatrixType>
typename MatrixType::RealScalar
CompleteOrthogonalDecomposition<MatrixType>::logAbsDeterminant() const {
return m_cpqr.logAbsDeterminant();
}
/** Performs the complete orthogonal decomposition of the given matrix \a
* matrix. The result of the factorization is stored into \c *this, and a
* reference to \c *this is returned.
*
* \sa class CompleteOrthogonalDecomposition,
* CompleteOrthogonalDecomposition(const MatrixType&)
*/
template <typename MatrixType>
void CompleteOrthogonalDecomposition<MatrixType>::computeInPlace()
{
check_template_parameters();
// the column permutation is stored as int indices, so just to be sure:
eigen_assert(m_cpqr.cols() <= NumTraits<int>::highest());
const Index rank = m_cpqr.rank();
const Index cols = m_cpqr.cols();
const Index rows = m_cpqr.rows();
m_zCoeffs.resize((std::min)(rows, cols));
m_temp.resize(cols);
if (rank < cols) {
// We have reduced the (permuted) matrix to the form
// [R11 R12]
// [ 0 R22]
// where R11 is r-by-r (r = rank) upper triangular, R12 is
// r-by-(n-r), and R22 is empty or the norm of R22 is negligible.
// We now compute the complete orthogonal decomposition by applying
// Householder transformations from the right to the upper trapezoidal
// matrix X = [R11 R12] to zero out R12 and obtain the factorization
// [R11 R12] = [T11 0] * Z, where T11 is r-by-r upper triangular and
// Z = Z(0) * Z(1) ... Z(r-1) is an n-by-n orthogonal matrix.
// We store the data representing Z in R12 and m_zCoeffs.
for (Index k = rank - 1; k >= 0; --k) {
if (k != rank - 1) {
// Given the API for Householder reflectors, it is more convenient if
// we swap the leading parts of columns k and r-1 (zero-based) to form
// the matrix X_k = [X(0:k, k), X(0:k, r:n)]
m_cpqr.m_qr.col(k).head(k + 1).swap(
m_cpqr.m_qr.col(rank - 1).head(k + 1));
}
// Construct Householder reflector Z(k) to zero out the last row of X_k,
// i.e. choose Z(k) such that
// [X(k, k), X(k, r:n)] * Z(k) = [beta, 0, .., 0].
RealScalar beta;
m_cpqr.m_qr.row(k)
.tail(cols - rank + 1)
.makeHouseholderInPlace(m_zCoeffs(k), beta);
m_cpqr.m_qr(k, rank - 1) = beta;
if (k > 0) {
// Apply Z(k) to the first k rows of X_k
m_cpqr.m_qr.topRightCorner(k, cols - rank + 1)
.applyHouseholderOnTheRight(
m_cpqr.m_qr.row(k).tail(cols - rank).transpose(), m_zCoeffs(k),
&m_temp(0));
}
if (k != rank - 1) {
// Swap X(0:k,k) back to its proper location.
m_cpqr.m_qr.col(k).head(k + 1).swap(
m_cpqr.m_qr.col(rank - 1).head(k + 1));
}
}
}
}
template <typename MatrixType>
template <typename Rhs>
void CompleteOrthogonalDecomposition<MatrixType>::applyZAdjointOnTheLeftInPlace(
Rhs& rhs) const {
const Index cols = this->cols();
const Index nrhs = rhs.cols();
const Index rank = this->rank();
Matrix<typename MatrixType::Scalar, Dynamic, 1> temp((std::max)(cols, nrhs));
for (Index k = 0; k < rank; ++k) {
if (k != rank - 1) {
rhs.row(k).swap(rhs.row(rank - 1));
}
rhs.middleRows(rank - 1, cols - rank + 1)
.applyHouseholderOnTheLeft(
matrixQTZ().row(k).tail(cols - rank).adjoint(), zCoeffs()(k),
&temp(0));
if (k != rank - 1) {
rhs.row(k).swap(rhs.row(rank - 1));
}
}
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template <typename _MatrixType>
template <typename RhsType, typename DstType>
void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(
const RhsType& rhs, DstType& dst) const {
eigen_assert(rhs.rows() == this->rows());
const Index rank = this->rank();
if (rank == 0) {
dst.setZero();
return;
}
// Compute c = Q^* * rhs
// Note that the matrix Q = H_0^* H_1^*... so its inverse is
// Q^* = (H_0 H_1 ...)^T
typename RhsType::PlainObject c(rhs);
c.applyOnTheLeft(
householderSequence(matrixQTZ(), hCoeffs()).setLength(rank).transpose());
// Solve T z = c(1:rank, :)
dst.topRows(rank) = matrixT()
.topLeftCorner(rank, rank)
.template triangularView<Upper>()
.solve(c.topRows(rank));
const Index cols = this->cols();
if (rank < cols) {
// Compute y = Z^* * [ z ]
// [ 0 ]
dst.bottomRows(cols - rank).setZero();
applyZAdjointOnTheLeftInPlace(dst);
}
// Undo permutation to get x = P^{-1} * y.
dst = colsPermutation() * dst;
}
#endif
namespace internal {
template<typename DstXprType, typename MatrixType>
struct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename CompleteOrthogonalDecomposition<MatrixType>::Scalar>, Dense2Dense>
{
typedef CompleteOrthogonalDecomposition<MatrixType> CodType;
typedef Inverse<CodType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename CodType::Scalar> &)
{
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows()));
}
};
} // end namespace internal
/** \returns the matrix Q as a sequence of householder transformations */
template <typename MatrixType>
typename CompleteOrthogonalDecomposition<MatrixType>::HouseholderSequenceType
CompleteOrthogonalDecomposition<MatrixType>::householderQ() const {
return m_cpqr.householderQ();
}
/** \return the complete orthogonal decomposition of \c *this.
*
* \sa class CompleteOrthogonalDecomposition
*/
template <typename Derived>
const CompleteOrthogonalDecomposition<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::completeOrthogonalDecomposition() const {
return CompleteOrthogonalDecomposition<PlainObject>(eval());
}
} // end namespace Eigen
#endif // EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H
| 20,805 | 35.955595 | 208 |
h
|
abess
|
abess-master/python/include/Eigen/src/QR/FullPivHouseholderQR.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
#define EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
namespace Eigen {
namespace internal {
template<typename _MatrixType> struct traits<FullPivHouseholderQR<_MatrixType> >
: traits<_MatrixType>
{
enum { Flags = 0 };
};
template<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType;
template<typename MatrixType>
struct traits<FullPivHouseholderQRMatrixQReturnType<MatrixType> >
{
typedef typename MatrixType::PlainObject ReturnType;
};
} // end namespace internal
/** \ingroup QR_Module
*
* \class FullPivHouseholderQR
*
* \brief Householder rank-revealing QR decomposition of a matrix with full pivoting
*
* \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition
*
* This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b P', \b Q and \b R
* such that
* \f[
* \mathbf{P} \, \mathbf{A} \, \mathbf{P}' = \mathbf{Q} \, \mathbf{R}
* \f]
* by using Householder transformations. Here, \b P and \b P' are permutation matrices, \b Q a unitary matrix
* and \b R an upper triangular matrix.
*
* This decomposition performs a very prudent full pivoting in order to be rank-revealing and achieve optimal
* numerical stability. The trade-off is that it is slower than HouseholderQR and ColPivHouseholderQR.
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::fullPivHouseholderQr()
*/
template<typename _MatrixType> class FullPivHouseholderQR
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
// FIXME should be int
typedef typename MatrixType::StorageIndex StorageIndex;
typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<StorageIndex, 1,
EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime,RowsAtCompileTime), RowMajor, 1,
EIGEN_SIZE_MIN_PREFER_FIXED(MaxColsAtCompileTime,MaxRowsAtCompileTime)> IntDiagSizeVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
typedef typename internal::plain_col_type<MatrixType>::type ColVectorType;
typedef typename MatrixType::PlainObject PlainObject;
/** \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via FullPivHouseholderQR::compute(const MatrixType&).
*/
FullPivHouseholderQR()
: m_qr(),
m_hCoeffs(),
m_rows_transpositions(),
m_cols_transpositions(),
m_cols_permutation(),
m_temp(),
m_isInitialized(false),
m_usePrescribedThreshold(false) {}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa FullPivHouseholderQR()
*/
FullPivHouseholderQR(Index rows, Index cols)
: m_qr(rows, cols),
m_hCoeffs((std::min)(rows,cols)),
m_rows_transpositions((std::min)(rows,cols)),
m_cols_transpositions((std::min)(rows,cols)),
m_cols_permutation(cols),
m_temp(cols),
m_isInitialized(false),
m_usePrescribedThreshold(false) {}
/** \brief Constructs a QR factorization from a given matrix
*
* This constructor computes the QR factorization of the matrix \a matrix by calling
* the method compute(). It is a short cut for:
*
* \code
* FullPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
* qr.compute(matrix);
* \endcode
*
* \sa compute()
*/
template<typename InputType>
explicit FullPivHouseholderQR(const EigenBase<InputType>& matrix)
: m_qr(matrix.rows(), matrix.cols()),
m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())),
m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())),
m_cols_permutation(matrix.cols()),
m_temp(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
compute(matrix.derived());
}
/** \brief Constructs a QR factorization from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref.
*
* \sa FullPivHouseholderQR(const EigenBase&)
*/
template<typename InputType>
explicit FullPivHouseholderQR(EigenBase<InputType>& matrix)
: m_qr(matrix.derived()),
m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())),
m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())),
m_cols_permutation(matrix.cols()),
m_temp(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
computeInPlace();
}
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* \c *this is the QR decomposition.
*
* \param b the right-hand-side of the equation to solve.
*
* \returns the exact or least-square solution if the rank is greater or equal to the number of columns of A,
* and an arbitrary solution otherwise.
*
* \note_about_checking_solutions
*
* \note_about_arbitrary_choice_of_solution
*
* Example: \include FullPivHouseholderQR_solve.cpp
* Output: \verbinclude FullPivHouseholderQR_solve.out
*/
template<typename Rhs>
inline const Solve<FullPivHouseholderQR, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return Solve<FullPivHouseholderQR, Rhs>(*this, b.derived());
}
/** \returns Expression object representing the matrix Q
*/
MatrixQReturnType matrixQ(void) const;
/** \returns a reference to the matrix where the Householder QR decomposition is stored
*/
const MatrixType& matrixQR() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_qr;
}
template<typename InputType>
FullPivHouseholderQR& compute(const EigenBase<InputType>& matrix);
/** \returns a const reference to the column permutation matrix */
const PermutationType& colsPermutation() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_cols_permutation;
}
/** \returns a const reference to the vector of indices representing the rows transpositions */
const IntDiagSizeVectorType& rowsTranspositions() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_rows_transpositions;
}
/** \returns the absolute value of the determinant of the matrix of which
* *this is the QR decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the QR decomposition has already been computed.
*
* \note This is only for square matrices.
*
* \warning a determinant can be very big or small, so for matrices
* of large enough dimension, there is a risk of overflow/underflow.
* One way to work around that is to use logAbsDeterminant() instead.
*
* \sa logAbsDeterminant(), MatrixBase::determinant()
*/
typename MatrixType::RealScalar absDeterminant() const;
/** \returns the natural log of the absolute value of the determinant of the matrix of which
* *this is the QR decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the QR decomposition has already been computed.
*
* \note This is only for square matrices.
*
* \note This method is useful to work around the risk of overflow/underflow that's inherent
* to determinant computation.
*
* \sa absDeterminant(), MatrixBase::determinant()
*/
typename MatrixType::RealScalar logAbsDeterminant() const;
/** \returns the rank of the matrix of which *this is the QR decomposition.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index rank() const
{
using std::abs;
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
Index result = 0;
for(Index i = 0; i < m_nonzero_pivots; ++i)
result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);
return result;
}
/** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index dimensionOfKernel() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return cols() - rank();
}
/** \returns true if the matrix of which *this is the QR decomposition represents an injective
* linear map, i.e. has trivial kernel; false otherwise.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isInjective() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return rank() == cols();
}
/** \returns true if the matrix of which *this is the QR decomposition represents a surjective
* linear map; false otherwise.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isSurjective() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return rank() == rows();
}
/** \returns true if the matrix of which *this is the QR decomposition is invertible.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isInvertible() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return isInjective() && isSurjective();
}
/** \returns the inverse of the matrix of which *this is the QR decomposition.
*
* \note If this matrix is not invertible, the returned matrix has undefined coefficients.
* Use isInvertible() to first determine whether this matrix is invertible.
*/
inline const Inverse<FullPivHouseholderQR> inverse() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return Inverse<FullPivHouseholderQR>(*this);
}
inline Index rows() const { return m_qr.rows(); }
inline Index cols() const { return m_qr.cols(); }
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
*
* For advanced uses only.
*/
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
/** Allows to prescribe a threshold to be used by certain methods, such as rank(),
* who need to determine when pivots are to be considered nonzero. This is not used for the
* QR decomposition itself.
*
* When it needs to get the threshold value, Eigen calls threshold(). By default, this
* uses a formula to automatically determine a reasonable threshold.
* Once you have called the present method setThreshold(const RealScalar&),
* your value is used instead.
*
* \param threshold The new value to use as the threshold.
*
* A pivot will be considered nonzero if its absolute value is strictly greater than
* \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
* where maxpivot is the biggest pivot.
*
* If you want to come back to the default behavior, call setThreshold(Default_t)
*/
FullPivHouseholderQR& setThreshold(const RealScalar& threshold)
{
m_usePrescribedThreshold = true;
m_prescribedThreshold = threshold;
return *this;
}
/** Allows to come back to the default behavior, letting Eigen use its default formula for
* determining the threshold.
*
* You should pass the special object Eigen::Default as parameter here.
* \code qr.setThreshold(Eigen::Default); \endcode
*
* See the documentation of setThreshold(const RealScalar&).
*/
FullPivHouseholderQR& setThreshold(Default_t)
{
m_usePrescribedThreshold = false;
return *this;
}
/** Returns the threshold that will be used by certain methods such as rank().
*
* See the documentation of setThreshold(const RealScalar&).
*/
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
: NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());
}
/** \returns the number of nonzero pivots in the QR decomposition.
* Here nonzero is meant in the exact sense, not in a fuzzy sense.
* So that notion isn't really intrinsically interesting, but it is
* still useful when implementing algorithms.
*
* \sa rank()
*/
inline Index nonzeroPivots() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return m_nonzero_pivots;
}
/** \returns the absolute value of the biggest pivot, i.e. the biggest
* diagonal coefficient of U.
*/
RealScalar maxPivot() const { return m_maxpivot; }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void computeInPlace();
MatrixType m_qr;
HCoeffsType m_hCoeffs;
IntDiagSizeVectorType m_rows_transpositions;
IntDiagSizeVectorType m_cols_transpositions;
PermutationType m_cols_permutation;
RowVectorType m_temp;
bool m_isInitialized, m_usePrescribedThreshold;
RealScalar m_prescribedThreshold, m_maxpivot;
Index m_nonzero_pivots;
RealScalar m_precision;
Index m_det_pq;
};
template<typename MatrixType>
typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::absDeterminant() const
{
using std::abs;
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
return abs(m_qr.diagonal().prod());
}
template<typename MatrixType>
typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDeterminant() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
return m_qr.diagonal().cwiseAbs().array().log().sum();
}
/** Performs the QR factorization of the given matrix \a matrix. The result of
* the factorization is stored into \c *this, and a reference to \c *this
* is returned.
*
* \sa class FullPivHouseholderQR, FullPivHouseholderQR(const MatrixType&)
*/
template<typename MatrixType>
template<typename InputType>
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const EigenBase<InputType>& matrix)
{
m_qr = matrix.derived();
computeInPlace();
return *this;
}
template<typename MatrixType>
void FullPivHouseholderQR<MatrixType>::computeInPlace()
{
check_template_parameters();
using std::abs;
Index rows = m_qr.rows();
Index cols = m_qr.cols();
Index size = (std::min)(rows,cols);
m_hCoeffs.resize(size);
m_temp.resize(cols);
m_precision = NumTraits<Scalar>::epsilon() * RealScalar(size);
m_rows_transpositions.resize(size);
m_cols_transpositions.resize(size);
Index number_of_transpositions = 0;
RealScalar biggest(0);
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0);
for (Index k = 0; k < size; ++k)
{
Index row_of_biggest_in_corner, col_of_biggest_in_corner;
typedef internal::scalar_score_coeff_op<Scalar> Scoring;
typedef typename Scoring::result_type Score;
Score score = m_qr.bottomRightCorner(rows-k, cols-k)
.unaryExpr(Scoring())
.maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);
row_of_biggest_in_corner += k;
col_of_biggest_in_corner += k;
RealScalar biggest_in_corner = internal::abs_knowing_score<Scalar>()(m_qr(row_of_biggest_in_corner, col_of_biggest_in_corner), score);
if(k==0) biggest = biggest_in_corner;
// if the corner is negligible, then we have less than full rank, and we can finish early
if(internal::isMuchSmallerThan(biggest_in_corner, biggest, m_precision))
{
m_nonzero_pivots = k;
for(Index i = k; i < size; i++)
{
m_rows_transpositions.coeffRef(i) = i;
m_cols_transpositions.coeffRef(i) = i;
m_hCoeffs.coeffRef(i) = Scalar(0);
}
break;
}
m_rows_transpositions.coeffRef(k) = row_of_biggest_in_corner;
m_cols_transpositions.coeffRef(k) = col_of_biggest_in_corner;
if(k != row_of_biggest_in_corner) {
m_qr.row(k).tail(cols-k).swap(m_qr.row(row_of_biggest_in_corner).tail(cols-k));
++number_of_transpositions;
}
if(k != col_of_biggest_in_corner) {
m_qr.col(k).swap(m_qr.col(col_of_biggest_in_corner));
++number_of_transpositions;
}
RealScalar beta;
m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
m_qr.coeffRef(k,k) = beta;
// remember the maximum absolute value of diagonal coefficients
if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta);
m_qr.bottomRightCorner(rows-k, cols-k-1)
.applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));
}
m_cols_permutation.setIdentity(cols);
for(Index k = 0; k < size; ++k)
m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
m_isInitialized = true;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename _MatrixType>
template<typename RhsType, typename DstType>
void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
eigen_assert(rhs.rows() == rows());
const Index l_rank = rank();
// FIXME introduce nonzeroPivots() and use it here. and more generally,
// make the same improvements in this dec as in FullPivLU.
if(l_rank==0)
{
dst.setZero();
return;
}
typename RhsType::PlainObject c(rhs);
Matrix<Scalar,1,RhsType::ColsAtCompileTime> temp(rhs.cols());
for (Index k = 0; k < l_rank; ++k)
{
Index remainingSize = rows()-k;
c.row(k).swap(c.row(m_rows_transpositions.coeff(k)));
c.bottomRightCorner(remainingSize, rhs.cols())
.applyHouseholderOnTheLeft(m_qr.col(k).tail(remainingSize-1),
m_hCoeffs.coeff(k), &temp.coeffRef(0));
}
m_qr.topLeftCorner(l_rank, l_rank)
.template triangularView<Upper>()
.solveInPlace(c.topRows(l_rank));
for(Index i = 0; i < l_rank; ++i) dst.row(m_cols_permutation.indices().coeff(i)) = c.row(i);
for(Index i = l_rank; i < cols(); ++i) dst.row(m_cols_permutation.indices().coeff(i)).setZero();
}
#endif
namespace internal {
template<typename DstXprType, typename MatrixType>
struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense>
{
typedef FullPivHouseholderQR<MatrixType> QrType;
typedef Inverse<QrType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)
{
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
}
};
/** \ingroup QR_Module
*
* \brief Expression type for return value of FullPivHouseholderQR::matrixQ()
*
* \tparam MatrixType type of underlying dense matrix
*/
template<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType
: public ReturnByValue<FullPivHouseholderQRMatrixQReturnType<MatrixType> >
{
public:
typedef typename FullPivHouseholderQR<MatrixType>::IntDiagSizeVectorType IntDiagSizeVectorType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<typename MatrixType::Scalar, 1, MatrixType::RowsAtCompileTime, RowMajor, 1,
MatrixType::MaxRowsAtCompileTime> WorkVectorType;
FullPivHouseholderQRMatrixQReturnType(const MatrixType& qr,
const HCoeffsType& hCoeffs,
const IntDiagSizeVectorType& rowsTranspositions)
: m_qr(qr),
m_hCoeffs(hCoeffs),
m_rowsTranspositions(rowsTranspositions)
{}
template <typename ResultType>
void evalTo(ResultType& result) const
{
const Index rows = m_qr.rows();
WorkVectorType workspace(rows);
evalTo(result, workspace);
}
template <typename ResultType>
void evalTo(ResultType& result, WorkVectorType& workspace) const
{
using numext::conj;
// compute the product H'_0 H'_1 ... H'_n-1,
// where H_k is the k-th Householder transformation I - h_k v_k v_k'
// and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
const Index rows = m_qr.rows();
const Index cols = m_qr.cols();
const Index size = (std::min)(rows, cols);
workspace.resize(rows);
result.setIdentity(rows, rows);
for (Index k = size-1; k >= 0; k--)
{
result.block(k, k, rows-k, rows-k)
.applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k));
result.row(k).swap(result.row(m_rowsTranspositions.coeff(k)));
}
}
Index rows() const { return m_qr.rows(); }
Index cols() const { return m_qr.rows(); }
protected:
typename MatrixType::Nested m_qr;
typename HCoeffsType::Nested m_hCoeffs;
typename IntDiagSizeVectorType::Nested m_rowsTranspositions;
};
// template<typename MatrixType>
// struct evaluator<FullPivHouseholderQRMatrixQReturnType<MatrixType> >
// : public evaluator<ReturnByValue<FullPivHouseholderQRMatrixQReturnType<MatrixType> > >
// {};
} // end namespace internal
template<typename MatrixType>
inline typename FullPivHouseholderQR<MatrixType>::MatrixQReturnType FullPivHouseholderQR<MatrixType>::matrixQ() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return MatrixQReturnType(m_qr, m_hCoeffs, m_rows_transpositions);
}
/** \return the full-pivoting Householder QR decomposition of \c *this.
*
* \sa class FullPivHouseholderQR
*/
template<typename Derived>
const FullPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::fullPivHouseholderQr() const
{
return FullPivHouseholderQR<PlainObject>(eval());
}
} // end namespace Eigen
#endif // EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
| 25,478 | 36.635155 | 186 |
h
|
abess
|
abess-master/python/include/Eigen/src/QR/HouseholderQR.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Benoit Jacob <[email protected]>
// Copyright (C) 2010 Vincent Lejeune
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_QR_H
#define EIGEN_QR_H
namespace Eigen {
/** \ingroup QR_Module
*
*
* \class HouseholderQR
*
* \brief Householder QR decomposition of a matrix
*
* \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition
*
* This class performs a QR decomposition of a matrix \b A into matrices \b Q and \b R
* such that
* \f[
* \mathbf{A} = \mathbf{Q} \, \mathbf{R}
* \f]
* by using Householder transformations. Here, \b Q a unitary matrix and \b R an upper triangular matrix.
* The result is stored in a compact way compatible with LAPACK.
*
* Note that no pivoting is performed. This is \b not a rank-revealing decomposition.
* If you want that feature, use FullPivHouseholderQR or ColPivHouseholderQR instead.
*
* This Householder QR decomposition is faster, but less numerically stable and less feature-full than
* FullPivHouseholderQR or ColPivHouseholderQR.
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::householderQr()
*/
template<typename _MatrixType> class HouseholderQR
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
// FIXME should be int
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via HouseholderQR::compute(const MatrixType&).
*/
HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa HouseholderQR()
*/
HouseholderQR(Index rows, Index cols)
: m_qr(rows, cols),
m_hCoeffs((std::min)(rows,cols)),
m_temp(cols),
m_isInitialized(false) {}
/** \brief Constructs a QR factorization from a given matrix
*
* This constructor computes the QR factorization of the matrix \a matrix by calling
* the method compute(). It is a short cut for:
*
* \code
* HouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
* qr.compute(matrix);
* \endcode
*
* \sa compute()
*/
template<typename InputType>
explicit HouseholderQR(const EigenBase<InputType>& matrix)
: m_qr(matrix.rows(), matrix.cols()),
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
m_temp(matrix.cols()),
m_isInitialized(false)
{
compute(matrix.derived());
}
/** \brief Constructs a QR factorization from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when
* \c MatrixType is a Eigen::Ref.
*
* \sa HouseholderQR(const EigenBase&)
*/
template<typename InputType>
explicit HouseholderQR(EigenBase<InputType>& matrix)
: m_qr(matrix.derived()),
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
m_temp(matrix.cols()),
m_isInitialized(false)
{
computeInPlace();
}
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* *this is the QR decomposition, if any exists.
*
* \param b the right-hand-side of the equation to solve.
*
* \returns a solution.
*
* \note_about_checking_solutions
*
* \note_about_arbitrary_choice_of_solution
*
* Example: \include HouseholderQR_solve.cpp
* Output: \verbinclude HouseholderQR_solve.out
*/
template<typename Rhs>
inline const Solve<HouseholderQR, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
return Solve<HouseholderQR, Rhs>(*this, b.derived());
}
/** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations.
*
* The returned expression can directly be used to perform matrix products. It can also be assigned to a dense Matrix object.
* Here is an example showing how to recover the full or thin matrix Q, as well as how to perform matrix products using operator*:
*
* Example: \include HouseholderQR_householderQ.cpp
* Output: \verbinclude HouseholderQR_householderQ.out
*/
HouseholderSequenceType householderQ() const
{
eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
}
/** \returns a reference to the matrix where the Householder QR decomposition is stored
* in a LAPACK-compatible way.
*/
const MatrixType& matrixQR() const
{
eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
return m_qr;
}
template<typename InputType>
HouseholderQR& compute(const EigenBase<InputType>& matrix) {
m_qr = matrix.derived();
computeInPlace();
return *this;
}
/** \returns the absolute value of the determinant of the matrix of which
* *this is the QR decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the QR decomposition has already been computed.
*
* \note This is only for square matrices.
*
* \warning a determinant can be very big or small, so for matrices
* of large enough dimension, there is a risk of overflow/underflow.
* One way to work around that is to use logAbsDeterminant() instead.
*
* \sa logAbsDeterminant(), MatrixBase::determinant()
*/
typename MatrixType::RealScalar absDeterminant() const;
/** \returns the natural log of the absolute value of the determinant of the matrix of which
* *this is the QR decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the QR decomposition has already been computed.
*
* \note This is only for square matrices.
*
* \note This method is useful to work around the risk of overflow/underflow that's inherent
* to determinant computation.
*
* \sa absDeterminant(), MatrixBase::determinant()
*/
typename MatrixType::RealScalar logAbsDeterminant() const;
inline Index rows() const { return m_qr.rows(); }
inline Index cols() const { return m_qr.cols(); }
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
*
* For advanced uses only.
*/
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void computeInPlace();
MatrixType m_qr;
HCoeffsType m_hCoeffs;
RowVectorType m_temp;
bool m_isInitialized;
};
template<typename MatrixType>
typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const
{
using std::abs;
eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
return abs(m_qr.diagonal().prod());
}
template<typename MatrixType>
typename MatrixType::RealScalar HouseholderQR<MatrixType>::logAbsDeterminant() const
{
eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
return m_qr.diagonal().cwiseAbs().array().log().sum();
}
namespace internal {
/** \internal */
template<typename MatrixQR, typename HCoeffs>
void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0)
{
typedef typename MatrixQR::Scalar Scalar;
typedef typename MatrixQR::RealScalar RealScalar;
Index rows = mat.rows();
Index cols = mat.cols();
Index size = (std::min)(rows,cols);
eigen_assert(hCoeffs.size() == size);
typedef Matrix<Scalar,MatrixQR::ColsAtCompileTime,1> TempType;
TempType tempVector;
if(tempData==0)
{
tempVector.resize(cols);
tempData = tempVector.data();
}
for(Index k = 0; k < size; ++k)
{
Index remainingRows = rows - k;
Index remainingCols = cols - k - 1;
RealScalar beta;
mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta);
mat.coeffRef(k,k) = beta;
// apply H to remaining part of m_qr from the left
mat.bottomRightCorner(remainingRows, remainingCols)
.applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1);
}
}
/** \internal */
template<typename MatrixQR, typename HCoeffs,
typename MatrixQRScalar = typename MatrixQR::Scalar,
bool InnerStrideIsOne = (MatrixQR::InnerStrideAtCompileTime == 1 && HCoeffs::InnerStrideAtCompileTime == 1)>
struct householder_qr_inplace_blocked
{
// This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h
static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index maxBlockSize=32,
typename MatrixQR::Scalar* tempData = 0)
{
typedef typename MatrixQR::Scalar Scalar;
typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
Index rows = mat.rows();
Index cols = mat.cols();
Index size = (std::min)(rows, cols);
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
TempType tempVector;
if(tempData==0)
{
tempVector.resize(cols);
tempData = tempVector.data();
}
Index blockSize = (std::min)(maxBlockSize,size);
Index k = 0;
for (k = 0; k < size; k += blockSize)
{
Index bs = (std::min)(size-k,blockSize); // actual size of the block
Index tcols = cols - k - bs; // trailing columns
Index brows = rows-k; // rows of the block
// partition the matrix:
// A00 | A01 | A02
// mat = A10 | A11 | A12
// A20 | A21 | A22
// and performs the qr dec of [A11^T A12^T]^T
// and update [A21^T A22^T]^T using level 3 operations.
// Finally, the algorithm continue on A22
BlockType A11_21 = mat.block(k,k,brows,bs);
Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);
householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);
if(tcols)
{
BlockType A21_22 = mat.block(k,k+bs,brows,tcols);
apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment, false); // false == backward
}
}
}
};
} // end namespace internal
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename _MatrixType>
template<typename RhsType, typename DstType>
void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
const Index rank = (std::min)(rows(), cols());
eigen_assert(rhs.rows() == rows());
typename RhsType::PlainObject c(rhs);
// Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
c.applyOnTheLeft(householderSequence(
m_qr.leftCols(rank),
m_hCoeffs.head(rank)).transpose()
);
m_qr.topLeftCorner(rank, rank)
.template triangularView<Upper>()
.solveInPlace(c.topRows(rank));
dst.topRows(rank) = c.topRows(rank);
dst.bottomRows(cols()-rank).setZero();
}
#endif
/** Performs the QR factorization of the given matrix \a matrix. The result of
* the factorization is stored into \c *this, and a reference to \c *this
* is returned.
*
* \sa class HouseholderQR, HouseholderQR(const MatrixType&)
*/
template<typename MatrixType>
void HouseholderQR<MatrixType>::computeInPlace()
{
check_template_parameters();
Index rows = m_qr.rows();
Index cols = m_qr.cols();
Index size = (std::min)(rows,cols);
m_hCoeffs.resize(size);
m_temp.resize(cols);
internal::householder_qr_inplace_blocked<MatrixType, HCoeffsType>::run(m_qr, m_hCoeffs, 48, m_temp.data());
m_isInitialized = true;
}
/** \return the Householder QR decomposition of \c *this.
*
* \sa class HouseholderQR
*/
template<typename Derived>
const HouseholderQR<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::householderQr() const
{
return HouseholderQR<PlainObject>(eval());
}
} // end namespace Eigen
#endif // EIGEN_QR_H
| 14,022 | 33.202439 | 176 |
h
|
abess
|
abess-master/python/include/Eigen/src/QR/HouseholderQR_LAPACKE.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Householder QR decomposition of a matrix w/o pivoting based on
* LAPACKE_?geqrf function.
********************************************************************************
*/
#ifndef EIGEN_QR_LAPACKE_H
#define EIGEN_QR_LAPACKE_H
namespace Eigen {
namespace internal {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_QR_NOPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \
template<typename MatrixQR, typename HCoeffs> \
struct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> \
{ \
static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index = 32, \
typename MatrixQR::Scalar* = 0) \
{ \
lapack_int m = (lapack_int) mat.rows(); \
lapack_int n = (lapack_int) mat.cols(); \
lapack_int lda = (lapack_int) mat.outerStride(); \
lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
LAPACKE_##LAPACKE_PREFIX##geqrf( matrix_order, m, n, (LAPACKE_TYPE*)mat.data(), lda, (LAPACKE_TYPE*)hCoeffs.data()); \
hCoeffs.adjointInPlace(); \
} \
};
EIGEN_LAPACKE_QR_NOPIV(double, double, d)
EIGEN_LAPACKE_QR_NOPIV(float, float, s)
EIGEN_LAPACKE_QR_NOPIV(dcomplex, lapack_complex_double, z)
EIGEN_LAPACKE_QR_NOPIV(scomplex, lapack_complex_float, c)
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_QR_LAPACKE_H
| 2,993 | 42.391304 | 122 |
h
|
abess
|
abess-master/python/include/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Desire Nuentsa <[email protected]>
// Copyright (C) 2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SUITESPARSEQRSUPPORT_H
#define EIGEN_SUITESPARSEQRSUPPORT_H
namespace Eigen {
template<typename MatrixType> class SPQR;
template<typename SPQRType> struct SPQRMatrixQReturnType;
template<typename SPQRType> struct SPQRMatrixQTransposeReturnType;
template <typename SPQRType, typename Derived> struct SPQR_QProduct;
namespace internal {
template <typename SPQRType> struct traits<SPQRMatrixQReturnType<SPQRType> >
{
typedef typename SPQRType::MatrixType ReturnType;
};
template <typename SPQRType> struct traits<SPQRMatrixQTransposeReturnType<SPQRType> >
{
typedef typename SPQRType::MatrixType ReturnType;
};
template <typename SPQRType, typename Derived> struct traits<SPQR_QProduct<SPQRType, Derived> >
{
typedef typename Derived::PlainObject ReturnType;
};
} // End namespace internal
/**
* \ingroup SPQRSupport_Module
* \class SPQR
* \brief Sparse QR factorization based on SuiteSparseQR library
*
* This class is used to perform a multithreaded and multifrontal rank-revealing QR decomposition
* of sparse matrices. The result is then used to solve linear leasts_square systems.
* Clearly, a QR factorization is returned such that A*P = Q*R where :
*
* P is the column permutation. Use colsPermutation() to get it.
*
* Q is the orthogonal matrix represented as Householder reflectors.
* Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
* You can then apply it to a vector.
*
* R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix.
* NOTE : The Index type of R is always SuiteSparse_long. You can get it with SPQR::Index
*
* \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
*
* \implsparsesolverconcept
*
*
*/
template<typename _MatrixType>
class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
{
protected:
typedef SparseSolverBase<SPQR<_MatrixType> > Base;
using Base::m_isInitialized;
public:
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef SuiteSparse_long StorageIndex ;
typedef SparseMatrix<Scalar, ColMajor, StorageIndex> MatrixType;
typedef Map<PermutationMatrix<Dynamic, Dynamic, StorageIndex> > PermutationType;
enum {
ColsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic
};
public:
SPQR()
: m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
}
explicit SPQR(const _MatrixType& matrix)
: m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
compute(matrix);
}
~SPQR()
{
SPQR_free();
cholmod_l_finish(&m_cc);
}
void SPQR_free()
{
cholmod_l_free_sparse(&m_H, &m_cc);
cholmod_l_free_sparse(&m_cR, &m_cc);
cholmod_l_free_dense(&m_HTau, &m_cc);
std::free(m_E);
std::free(m_HPinv);
}
void compute(const _MatrixType& matrix)
{
if(m_isInitialized) SPQR_free();
MatrixType mat(matrix);
/* Compute the default threshold as in MatLab, see:
* Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
* Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
*/
RealScalar pivotThreshold = m_tolerance;
if(m_useDefaultThreshold)
{
RealScalar max2Norm = 0.0;
for (int j = 0; j < mat.cols(); j++) max2Norm = numext::maxi(max2Norm, mat.col(j).norm());
if(max2Norm==RealScalar(0))
max2Norm = RealScalar(1);
pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon();
}
cholmod_sparse A;
A = viewAsCholmod(mat);
m_rows = matrix.rows();
Index col = matrix.cols();
m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A,
&m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
if (!m_cR)
{
m_info = NumericalIssue;
m_isInitialized = false;
return;
}
m_info = Success;
m_isInitialized = true;
m_isRUpToDate = false;
}
/**
* Get the number of rows of the input matrix and the Q matrix
*/
inline Index rows() const {return m_rows; }
/**
* Get the number of columns of the input matrix.
*/
inline Index cols() const { return m_cR->ncol; }
template<typename Rhs, typename Dest>
void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
{
eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
eigen_assert(b.cols()==1 && "This method is for vectors only");
//Compute Q^T * b
typename Dest::PlainObject y, y2;
y = matrixQ().transpose() * b;
// Solves with the triangular matrix R
Index rk = this->rank();
y2 = y;
y.resize((std::max)(cols(),Index(y.rows())),y.cols());
y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y2.topRows(rk));
// Apply the column permutation
// colsPermutation() performs a copy of the permutation,
// so let's apply it manually:
for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i);
for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero();
// y.bottomRows(y.rows()-rk).setZero();
// dest = colsPermutation() * y.topRows(cols());
m_info = Success;
}
/** \returns the sparse triangular factor R. It is a sparse matrix
*/
const MatrixType matrixR() const
{
eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
if(!m_isRUpToDate) {
m_R = viewAsEigen<Scalar,ColMajor, typename MatrixType::StorageIndex>(*m_cR);
m_isRUpToDate = true;
}
return m_R;
}
/// Get an expression of the matrix Q
SPQRMatrixQReturnType<SPQR> matrixQ() const
{
return SPQRMatrixQReturnType<SPQR>(*this);
}
/// Get the permutation that was applied to columns of A
PermutationType colsPermutation() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return PermutationType(m_E, m_cR->ncol);
}
/**
* Gets the rank of the matrix.
* It should be equal to matrixQR().cols if the matrix is full-rank
*/
Index rank() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_cc.SPQR_istat[4];
}
/// Set the fill-reducing ordering method to be used
void setSPQROrdering(int ord) { m_ordering = ord;}
/// Set the tolerance tol to treat columns with 2-norm < =tol as zero
void setPivotThreshold(const RealScalar& tol)
{
m_useDefaultThreshold = false;
m_tolerance = tol;
}
/** \returns a pointer to the SPQR workspace */
cholmod_common *cholmodCommon() const { return &m_cc; }
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the sparse QR can not be computed
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
protected:
bool m_analysisIsOk;
bool m_factorizationIsOk;
mutable bool m_isRUpToDate;
mutable ComputationInfo m_info;
int m_ordering; // Ordering method to use, see SPQR's manual
int m_allow_tol; // Allow to use some tolerance during numerical factorization.
RealScalar m_tolerance; // treat columns with 2-norm below this tolerance as zero
mutable cholmod_sparse *m_cR; // The sparse R factor in cholmod format
mutable MatrixType m_R; // The sparse matrix R in Eigen format
mutable StorageIndex *m_E; // The permutation applied to columns
mutable cholmod_sparse *m_H; //The householder vectors
mutable StorageIndex *m_HPinv; // The row permutation of H
mutable cholmod_dense *m_HTau; // The Householder coefficients
mutable Index m_rank; // The rank of the matrix
mutable cholmod_common m_cc; // Workspace and parameters
bool m_useDefaultThreshold; // Use default threshold
Index m_rows;
template<typename ,typename > friend struct SPQR_QProduct;
};
template <typename SPQRType, typename Derived>
struct SPQR_QProduct : ReturnByValue<SPQR_QProduct<SPQRType,Derived> >
{
typedef typename SPQRType::Scalar Scalar;
typedef typename SPQRType::StorageIndex StorageIndex;
//Define the constructor to get reference to argument types
SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {}
inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); }
inline Index cols() const { return m_other.cols(); }
// Assign to a vector
template<typename ResType>
void evalTo(ResType& res) const
{
cholmod_dense y_cd;
cholmod_dense *x_cd;
int method = m_transpose ? SPQR_QTX : SPQR_QX;
cholmod_common *cc = m_spqr.cholmodCommon();
y_cd = viewAsCholmod(m_other.const_cast_derived());
x_cd = SuiteSparseQR_qmult<Scalar>(method, m_spqr.m_H, m_spqr.m_HTau, m_spqr.m_HPinv, &y_cd, cc);
res = Matrix<Scalar,ResType::RowsAtCompileTime,ResType::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x), x_cd->nrow, x_cd->ncol);
cholmod_l_free_dense(&x_cd, cc);
}
const SPQRType& m_spqr;
const Derived& m_other;
bool m_transpose;
};
template<typename SPQRType>
struct SPQRMatrixQReturnType{
SPQRMatrixQReturnType(const SPQRType& spqr) : m_spqr(spqr) {}
template<typename Derived>
SPQR_QProduct<SPQRType, Derived> operator*(const MatrixBase<Derived>& other)
{
return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(),false);
}
SPQRMatrixQTransposeReturnType<SPQRType> adjoint() const
{
return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr);
}
// To use for operations with the transpose of Q
SPQRMatrixQTransposeReturnType<SPQRType> transpose() const
{
return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr);
}
const SPQRType& m_spqr;
};
template<typename SPQRType>
struct SPQRMatrixQTransposeReturnType{
SPQRMatrixQTransposeReturnType(const SPQRType& spqr) : m_spqr(spqr) {}
template<typename Derived>
SPQR_QProduct<SPQRType,Derived> operator*(const MatrixBase<Derived>& other)
{
return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(), true);
}
const SPQRType& m_spqr;
};
}// End namespace Eigen
#endif
| 11,405 | 35.324841 | 145 |
h
|
abess
|
abess-master/python/include/Eigen/src/SVD/BDCSVD.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
// research report written by Ming Gu and Stanley C.Eisenstat
// The code variable names correspond to the names they used in their
// report
//
// Copyright (C) 2013 Gauthier Brun <[email protected]>
// Copyright (C) 2013 Nicolas Carre <[email protected]>
// Copyright (C) 2013 Jean Ceccato <[email protected]>
// Copyright (C) 2013 Pierre Zoppitelli <[email protected]>
// Copyright (C) 2013 Jitse Niesen <[email protected]>
// Copyright (C) 2014-2016 Gael Guennebaud <[email protected]>
//
// Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BDCSVD_H
#define EIGEN_BDCSVD_H
// #define EIGEN_BDCSVD_DEBUG_VERBOSE
// #define EIGEN_BDCSVD_SANITY_CHECKS
namespace Eigen {
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]");
#endif
template<typename _MatrixType> class BDCSVD;
namespace internal {
template<typename _MatrixType>
struct traits<BDCSVD<_MatrixType> >
{
typedef _MatrixType MatrixType;
};
} // end namespace internal
/** \ingroup SVD_Module
*
*
* \class BDCSVD
*
* \brief class Bidiagonal Divide and Conquer SVD
*
* \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition
*
* This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization,
* and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD.
* You can control the switching size with the setSwitchSize() method, default is 16.
* For small matrice (<16), it is thus preferable to directly use JacobiSVD. For larger ones, BDCSVD is highly
* recommended and can several order of magnitude faster.
*
* \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.
* For instance, this concerns Intel's compiler (ICC), which perfroms such optimization by default unless
* you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will
* significantly degrade the accuracy.
*
* \sa class JacobiSVD
*/
template<typename _MatrixType>
class BDCSVD : public SVDBase<BDCSVD<_MatrixType> >
{
typedef SVDBase<BDCSVD> Base;
public:
using Base::rows;
using Base::cols;
using Base::computeU;
using Base::computeV;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename NumTraits<RealScalar>::Literal Literal;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
MatrixOptions = MatrixType::Options
};
typedef typename Base::MatrixUType MatrixUType;
typedef typename Base::MatrixVType MatrixVType;
typedef typename Base::SingularValuesType SingularValuesType;
typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> MatrixX;
typedef Matrix<RealScalar, Dynamic, Dynamic, ColMajor> MatrixXr;
typedef Matrix<RealScalar, Dynamic, 1> VectorType;
typedef Array<RealScalar, Dynamic, 1> ArrayXr;
typedef Array<Index,1,Dynamic> ArrayXi;
typedef Ref<ArrayXr> ArrayRef;
typedef Ref<ArrayXi> IndicesRef;
/** \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via BDCSVD::compute(const MatrixType&).
*/
BDCSVD() : m_algoswap(16), m_numIters(0)
{}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem size.
* \sa BDCSVD()
*/
BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
: m_algoswap(16), m_numIters(0)
{
allocate(rows, cols, computationOptions);
}
/** \brief Constructor performing the decomposition of given matrix.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non - default) FullPivHouseholderQR preconditioner.
*/
BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
: m_algoswap(16), m_numIters(0)
{
compute(matrix, computationOptions);
}
~BDCSVD()
{
}
/** \brief Method performing the decomposition of given matrix using custom options.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non - default) FullPivHouseholderQR preconditioner.
*/
BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
/** \brief Method performing the decomposition of given matrix using current options.
*
* \param matrix the matrix to decompose
*
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
*/
BDCSVD& compute(const MatrixType& matrix)
{
return compute(matrix, this->m_computationOptions);
}
void setSwitchSize(int s)
{
eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3");
m_algoswap = s;
}
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift);
void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V);
void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus);
void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat);
void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V);
void deflation43(Index firstCol, Index shift, Index i, Index size);
void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev);
void structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1);
static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift);
protected:
MatrixXr m_naiveU, m_naiveV;
MatrixXr m_computed;
Index m_nRec;
ArrayXr m_workspace;
ArrayXi m_workspaceI;
int m_algoswap;
bool m_isTranspose, m_compU, m_compV;
using Base::m_singularValues;
using Base::m_diagSize;
using Base::m_computeFullU;
using Base::m_computeFullV;
using Base::m_computeThinU;
using Base::m_computeThinV;
using Base::m_matrixU;
using Base::m_matrixV;
using Base::m_isInitialized;
using Base::m_nonzeroSingularValues;
public:
int m_numIters;
}; //end class BDCSVD
// Method to allocate and initialize matrix and attributes
template<typename MatrixType>
void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
{
m_isTranspose = (cols > rows);
if (Base::allocate(rows, cols, computationOptions))
return;
m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize );
m_compU = computeV();
m_compV = computeU();
if (m_isTranspose)
std::swap(m_compU, m_compV);
if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 );
else m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 );
if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize);
m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3);
m_workspaceI.resize(3*m_diagSize);
}// end allocate
template<typename MatrixType>
BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "\n\n\n======================================================================================================================\n\n\n";
#endif
allocate(matrix.rows(), matrix.cols(), computationOptions);
using std::abs;
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
//**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return
if(matrix.cols() < m_algoswap)
{
// FIXME this line involves temporaries
JacobiSVD<MatrixType> jsvd(matrix,computationOptions);
if(computeU()) m_matrixU = jsvd.matrixU();
if(computeV()) m_matrixV = jsvd.matrixV();
m_singularValues = jsvd.singularValues();
m_nonzeroSingularValues = jsvd.nonzeroSingularValues();
m_isInitialized = true;
return *this;
}
//**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows
RealScalar scale = matrix.cwiseAbs().maxCoeff();
if(scale==Literal(0)) scale = Literal(1);
MatrixX copy;
if (m_isTranspose) copy = matrix.adjoint()/scale;
else copy = matrix/scale;
//**** step 1 - Bidiagonalization
// FIXME this line involves temporaries
internal::UpperBidiagonalization<MatrixX> bid(copy);
//**** step 2 - Divide & Conquer
m_naiveU.setZero();
m_naiveV.setZero();
// FIXME this line involves a temporary matrix
m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose();
m_computed.template bottomRows<1>().setZero();
divide(0, m_diagSize - 1, 0, 0, 0);
//**** step 3 - Copy singular values and vectors
for (int i=0; i<m_diagSize; i++)
{
RealScalar a = abs(m_computed.coeff(i, i));
m_singularValues.coeffRef(i) = a * scale;
if (a<considerZero)
{
m_nonzeroSingularValues = i;
m_singularValues.tail(m_diagSize - i - 1).setZero();
break;
}
else if (i == m_diagSize - 1)
{
m_nonzeroSingularValues = i + 1;
break;
}
}
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
// std::cout << "m_naiveU\n" << m_naiveU << "\n\n";
// std::cout << "m_naiveV\n" << m_naiveV << "\n\n";
#endif
if(m_isTranspose) copyUV(bid.householderV(), bid.householderU(), m_naiveV, m_naiveU);
else copyUV(bid.householderU(), bid.householderV(), m_naiveU, m_naiveV);
m_isInitialized = true;
return *this;
}// end compute
template<typename MatrixType>
template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
void BDCSVD<MatrixType>::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV)
{
// Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa
if (computeU())
{
Index Ucols = m_computeThinU ? m_diagSize : householderU.cols();
m_matrixU = MatrixX::Identity(householderU.cols(), Ucols);
m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer
}
if (computeV())
{
Index Vcols = m_computeThinV ? m_diagSize : householderV.cols();
m_matrixV = MatrixX::Identity(householderV.cols(), Vcols);
m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer
}
}
/** \internal
* Performs A = A * B exploiting the special structure of the matrix A. Splitting A as:
* A = [A1]
* [A2]
* such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros.
* We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large
* enough.
*/
template<typename MatrixType>
void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1)
{
Index n = A.rows();
if(n>100)
{
// If the matrices are large enough, let's exploit the sparse structure of A by
// splitting it in half (wrt n1), and packing the non-zero columns.
Index n2 = n - n1;
Map<MatrixXr> A1(m_workspace.data() , n1, n);
Map<MatrixXr> A2(m_workspace.data()+ n1*n, n2, n);
Map<MatrixXr> B1(m_workspace.data()+ n*n, n, n);
Map<MatrixXr> B2(m_workspace.data()+2*n*n, n, n);
Index k1=0, k2=0;
for(Index j=0; j<n; ++j)
{
if( (A.col(j).head(n1).array()!=Literal(0)).any() )
{
A1.col(k1) = A.col(j).head(n1);
B1.row(k1) = B.row(j);
++k1;
}
if( (A.col(j).tail(n2).array()!=Literal(0)).any() )
{
A2.col(k2) = A.col(j).tail(n2);
B2.row(k2) = B.row(j);
++k2;
}
}
A.topRows(n1).noalias() = A1.leftCols(k1) * B1.topRows(k1);
A.bottomRows(n2).noalias() = A2.leftCols(k2) * B2.topRows(k2);
}
else
{
Map<MatrixXr,Aligned> tmp(m_workspace.data(),n,n);
tmp.noalias() = A*B;
A = tmp;
}
}
// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
// place of the submatrix we are currently working on.
//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
// lastCol + 1 - firstCol is the size of the submatrix.
//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
//@param firstRowW : Same as firstRowW with the column.
//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
template<typename MatrixType>
void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift)
{
// requires rows = cols + 1;
using std::pow;
using std::sqrt;
using std::abs;
const Index n = lastCol - firstCol + 1;
const Index k = n/2;
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
RealScalar alphaK;
RealScalar betaK;
RealScalar r0;
RealScalar lambda, phi, c0, s0;
VectorType l, f;
// We use the other algorithm which is more efficient for small
// matrices.
if (n < m_algoswap)
{
// FIXME this line involves temporaries
JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0));
if (m_compU)
m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU();
else
{
m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0);
m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n);
}
if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV();
m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n);
return;
}
// We use the divide and conquer algorithm
alphaK = m_computed(firstCol + k, firstCol + k);
betaK = m_computed(firstCol + k + 1, firstCol + k);
// The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
// and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
// right submatrix before the left one.
divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
if (m_compU)
{
lambda = m_naiveU(firstCol + k, firstCol + k);
phi = m_naiveU(firstCol + k + 1, lastCol + 1);
}
else
{
lambda = m_naiveU(1, firstCol + k);
phi = m_naiveU(0, lastCol + 1);
}
r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi));
if (m_compU)
{
l = m_naiveU.row(firstCol + k).segment(firstCol, k);
f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
}
else
{
l = m_naiveU.row(1).segment(firstCol, k);
f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
}
if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1);
if (r0<considerZero)
{
c0 = Literal(1);
s0 = Literal(0);
}
else
{
c0 = alphaK * lambda / r0;
s0 = betaK * phi / r0;
}
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
if (m_compU)
{
MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
// we shiftW Q1 to the right
for (Index i = firstCol + k - 1; i >= firstCol; i--)
m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1);
// we shift q1 at the left with a factor c0
m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0);
// last column = q1 * - s0
m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0));
// first column = q2 * s0
m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0;
// q2 *= c0
m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
}
else
{
RealScalar q1 = m_naiveU(0, firstCol + k);
// we shift Q1 to the right
for (Index i = firstCol + k - 1; i >= firstCol; i--)
m_naiveU(0, i + 1) = m_naiveU(0, i);
// we shift q1 at the left with a factor c0
m_naiveU(0, firstCol) = (q1 * c0);
// last column = q1 * - s0
m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
// first column = q2 * s0
m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
// q2 *= c0
m_naiveU(1, lastCol + 1) *= c0;
m_naiveU.row(1).segment(firstCol + 1, k).setZero();
m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
}
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
m_computed(firstCol + shift, firstCol + shift) = r0;
m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real();
m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real();
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
#endif
// Second part: try to deflate singular values in combined matrix
deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
std::cout << "\n\nj1 = " << tmp1.transpose().format(bdcsvdfmt) << "\n";
std::cout << "j2 = " << tmp2.transpose().format(bdcsvdfmt) << "\n\n";
std::cout << "err: " << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << "\n";
static int count = 0;
std::cout << "# " << ++count << "\n\n";
assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm());
// assert(count<681);
// assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all());
#endif
// Third part: compute SVD of combined matrix
MatrixXr UofSVD, VofSVD;
VectorType singVals;
computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD);
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(UofSVD.allFinite());
assert(VofSVD.allFinite());
#endif
if (m_compU)
structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2);
else
{
Map<Matrix<RealScalar,2,Dynamic>,Aligned> tmp(m_workspace.data(),2,n+1);
tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD;
m_naiveU.middleCols(firstCol, n + 1) = tmp;
}
if (m_compV) structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2);
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero();
m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals;
}// end divide
// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in
// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing
// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except
// that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order.
//
// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better
// handling of round-off errors, be consistent in ordering
// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
template <typename MatrixType>
void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
{
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
using std::abs;
ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n);
m_workspace.head(n) = m_computed.block(firstCol, firstCol, n, n).diagonal();
ArrayRef diag = m_workspace.head(n);
diag(0) = Literal(0);
// Allocate space for singular values and vectors
singVals.resize(n);
U.resize(n+1, n+1);
if (m_compV) V.resize(n, n);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
if (col0.hasNaN() || diag.hasNaN())
std::cout << "\n\nHAS NAN\n\n";
#endif
// Many singular values might have been deflated, the zero ones have been moved to the end,
// but others are interleaved and we must ignore them at this stage.
// To this end, let's compute a permutation skipping them:
Index actual_n = n;
while(actual_n>1 && diag(actual_n-1)==Literal(0)) --actual_n;
Index m = 0; // size of the deflated problem
for(Index k=0;k<actual_n;++k)
if(abs(col0(k))>considerZero)
m_workspaceI(m++) = k;
Map<ArrayXi> perm(m_workspaceI.data(),m);
Map<ArrayXr> shifts(m_workspace.data()+1*n, n);
Map<ArrayXr> mus(m_workspace.data()+2*n, n);
Map<ArrayXr> zhat(m_workspace.data()+3*n, n);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "computeSVDofM using:\n";
std::cout << " z: " << col0.transpose() << "\n";
std::cout << " d: " << diag.transpose() << "\n";
#endif
// Compute singVals, shifts, and mus
computeSingVals(col0, diag, perm, singVals, shifts, mus);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << " j: " << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << "\n\n";
std::cout << " sing-val: " << singVals.transpose() << "\n";
std::cout << " mu: " << mus.transpose() << "\n";
std::cout << " shift: " << shifts.transpose() << "\n";
{
Index actual_n = n;
while(actual_n>1 && abs(col0(actual_n-1))<considerZero) --actual_n;
std::cout << "\n\n mus: " << mus.head(actual_n).transpose() << "\n\n";
std::cout << " check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n";
std::cout << " check2 (>0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n";
std::cout << " check3 (>0) : " << ((diag.segment(1,actual_n-1)-singVals.head(actual_n-1).array()) / singVals.head(actual_n-1).array()).transpose() << "\n\n\n";
std::cout << " check4 (>0) : " << ((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).transpose() << "\n\n\n";
}
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(singVals.allFinite());
assert(mus.allFinite());
assert(shifts.allFinite());
#endif
// Compute zhat
perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << " zhat: " << zhat.transpose() << "\n";
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(zhat.allFinite());
#endif
computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "U^T U: " << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << "\n";
std::cout << "V^T V: " << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << "\n";
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(U.allFinite());
assert(V.allFinite());
assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 1e-14 * n);
assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 1e-14 * n);
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
// Because of deflation, the singular values might not be completely sorted.
// Fortunately, reordering them is a O(n) problem
for(Index i=0; i<actual_n-1; ++i)
{
if(singVals(i)>singVals(i+1))
{
using std::swap;
swap(singVals(i),singVals(i+1));
U.col(i).swap(U.col(i+1));
if(m_compV) V.col(i).swap(V.col(i+1));
}
}
// Reverse order so that singular values in increased order
// Because of deflation, the zeros singular-values are already at the end
singVals.head(actual_n).reverseInPlace();
U.leftCols(actual_n).rowwise().reverseInPlace();
if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace();
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
JacobiSVD<MatrixXr> jsvd(m_computed.block(firstCol, firstCol, n, n) );
std::cout << " * j: " << jsvd.singularValues().transpose() << "\n\n";
std::cout << " * sing-val: " << singVals.transpose() << "\n";
// std::cout << " * err: " << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << "\n";
#endif
}
template <typename MatrixType>
typename BDCSVD<MatrixType>::RealScalar BDCSVD<MatrixType>::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift)
{
Index m = perm.size();
RealScalar res = Literal(1);
for(Index i=0; i<m; ++i)
{
Index j = perm(i);
res += numext::abs2(col0(j)) / ((diagShifted(j) - mu) * (diag(j) + shift + mu));
}
return res;
}
template <typename MatrixType>
void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm,
VectorType& singVals, ArrayRef shifts, ArrayRef mus)
{
using std::abs;
using std::swap;
Index n = col0.size();
Index actual_n = n;
while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n;
for (Index k = 0; k < n; ++k)
{
if (col0(k) == Literal(0) || actual_n==1)
{
// if col0(k) == 0, then entry is deflated, so singular value is on diagonal
// if actual_n==1, then the deflated problem is already diagonalized
singVals(k) = k==0 ? col0(0) : diag(k);
mus(k) = Literal(0);
shifts(k) = k==0 ? col0(0) : diag(k);
continue;
}
// otherwise, use secular equation to find singular value
RealScalar left = diag(k);
RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm());
if(k==actual_n-1)
right = (diag(actual_n-1) + col0.matrix().norm());
else
{
// Skip deflated singular values
Index l = k+1;
while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l<actual_n); }
right = diag(l);
}
// first decide whether it's closer to the left end or the right end
RealScalar mid = left + (right-left) / Literal(2);
RealScalar fMid = secularEq(mid, col0, diag, perm, diag, Literal(0));
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << right-left << "\n";
std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, diag-left, left) << " " << secularEq(mid-right, col0, diag, perm, diag-right, right) << "\n";
std::cout << " = " << secularEq(0.1*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.2*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.3*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.4*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.49*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.5*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.51*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.6*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.7*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.8*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.9*(left+right), col0, diag, perm, diag, 0) << "\n";
#endif
RealScalar shift = (k == actual_n-1 || fMid > Literal(0)) ? left : right;
// measure everything relative to shift
Map<ArrayXr> diagShifted(m_workspace.data()+4*n, n);
diagShifted = diag - shift;
// initial guess
RealScalar muPrev, muCur;
if (shift == left)
{
muPrev = (right - left) * RealScalar(0.1);
if (k == actual_n-1) muCur = right - left;
else muCur = (right - left) * RealScalar(0.5);
}
else
{
muPrev = -(right - left) * RealScalar(0.1);
muCur = -(right - left) * RealScalar(0.5);
}
RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift);
RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift);
if (abs(fPrev) < abs(fCur))
{
swap(fPrev, fCur);
swap(muPrev, muCur);
}
// rational interpolation: fit a function of the form a / mu + b through the two previous
// iterates and use its zero to compute the next iterate
bool useBisection = fPrev*fCur>Literal(0);
while (fCur!=Literal(0) && abs(muCur - muPrev) > Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)
{
++m_numIters;
// Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples.
RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev);
RealScalar b = fCur - a / muCur;
// And find mu such that f(mu)==0:
RealScalar muZero = -a/b;
RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift);
muPrev = muCur;
fPrev = fCur;
muCur = muZero;
fCur = fZero;
if (shift == left && (muCur < Literal(0) || muCur > right - left)) useBisection = true;
if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true;
if (abs(fCur)>abs(fPrev)) useBisection = true;
}
// fall back on bisection method if rational interpolation did not work
if (useBisection)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "useBisection for k = " << k << ", actual_n = " << actual_n << "\n";
#endif
RealScalar leftShifted, rightShifted;
if (shift == left)
{
leftShifted = (std::numeric_limits<RealScalar>::min)();
// I don't understand why the case k==0 would be special there:
// if (k == 0) rightShifted = right - left; else
rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.6)); // theoretically we can take 0.5, but let's be safe
}
else
{
leftShifted = -(right - left) * RealScalar(0.6);
rightShifted = -(std::numeric_limits<RealScalar>::min)();
}
RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift);
#if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_DEBUG_VERBOSE
RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift);
#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
if(!(fLeft * fRight<0))
{
std::cout << "fLeft: " << leftShifted << " - " << diagShifted.head(10).transpose() << "\n ; " << bool(left==shift) << " " << (left-shift) << "\n";
std::cout << k << " : " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; " << left << " - " << right << " -> " << leftShifted << " " << rightShifted << " shift=" << shift << "\n";
}
#endif
eigen_internal_assert(fLeft * fRight < Literal(0));
while (rightShifted - leftShifted > Literal(2) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
{
RealScalar midShifted = (leftShifted + rightShifted) / Literal(2);
fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
if (fLeft * fMid < Literal(0))
{
rightShifted = midShifted;
}
else
{
leftShifted = midShifted;
fLeft = fMid;
}
}
muCur = (leftShifted + rightShifted) / Literal(2);
}
singVals[k] = shift + muCur;
shifts[k] = shift;
mus[k] = muCur;
// perturb singular value slightly if it equals diagonal entry to avoid division by zero later
// (deflation is supposed to avoid this from happening)
// - this does no seem to be necessary anymore -
// if (singVals[k] == left) singVals[k] *= 1 + NumTraits<RealScalar>::epsilon();
// if (singVals[k] == right) singVals[k] *= 1 - NumTraits<RealScalar>::epsilon();
}
}
// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1)
template <typename MatrixType>
void BDCSVD<MatrixType>::perturbCol0
(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat)
{
using std::sqrt;
Index n = col0.size();
Index m = perm.size();
if(m==0)
{
zhat.setZero();
return;
}
Index last = perm(m-1);
// The offset permits to skip deflated entries while computing zhat
for (Index k = 0; k < n; ++k)
{
if (col0(k) == Literal(0)) // deflated
zhat(k) = Literal(0);
else
{
// see equation (3.6)
RealScalar dk = diag(k);
RealScalar prod = (singVals(last) + dk) * (mus(last) + (shifts(last) - dk));
for(Index l = 0; l<m; ++l)
{
Index i = perm(l);
if(i!=k)
{
Index j = i<k ? i : perm(l-1);
prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk)));
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
if(i!=k && std::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )
std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk))
<< ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n";
#endif
}
}
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(last) + dk) << " * " << mus(last) + shifts(last) << " - " << dk << "\n";
#endif
RealScalar tmp = sqrt(prod);
zhat(k) = col0(k) > Literal(0) ? tmp : -tmp;
}
}
}
// compute singular vectors
template <typename MatrixType>
void BDCSVD<MatrixType>::computeSingVecs
(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V)
{
Index n = zhat.size();
Index m = perm.size();
for (Index k = 0; k < n; ++k)
{
if (zhat(k) == Literal(0))
{
U.col(k) = VectorType::Unit(n+1, k);
if (m_compV) V.col(k) = VectorType::Unit(n, k);
}
else
{
U.col(k).setZero();
for(Index l=0;l<m;++l)
{
Index i = perm(l);
U(i,k) = zhat(i)/(((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
}
U(n,k) = Literal(0);
U.col(k).normalize();
if (m_compV)
{
V.col(k).setZero();
for(Index l=1;l<m;++l)
{
Index i = perm(l);
V(i,k) = diag(i) * zhat(i) / (((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
}
V(0,k) = Literal(-1);
V.col(k).normalize();
}
}
}
U.col(n) = VectorType::Unit(n+1, n);
}
// page 12_13
// i >= 1, di almost null and zi non null.
// We use a rotation to zero out zi applied to the left of M
template <typename MatrixType>
void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size)
{
using std::abs;
using std::sqrt;
using std::pow;
Index start = firstCol + shift;
RealScalar c = m_computed(start, start);
RealScalar s = m_computed(start+i, start);
RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));
if (r == Literal(0))
{
m_computed(start+i, start+i) = Literal(0);
return;
}
m_computed(start,start) = r;
m_computed(start+i, start) = Literal(0);
m_computed(start+i, start+i) = Literal(0);
JacobiRotation<RealScalar> J(c/r,-s/r);
if (m_compU) m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J);
else m_naiveU.applyOnTheRight(firstCol, firstCol+i, J);
}// end deflation 43
// page 13
// i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M)
// We apply two rotations to have zj = 0;
// TODO deflation44 is still broken and not properly tested
template <typename MatrixType>
void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size)
{
using std::abs;
using std::sqrt;
using std::conj;
using std::pow;
RealScalar c = m_computed(firstColm+i, firstColm);
RealScalar s = m_computed(firstColm+j, firstColm);
RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.4: " << i << "," << j << " -> " << c << " " << s << " " << r << " ; "
<< m_computed(firstColm + i-1, firstColm) << " "
<< m_computed(firstColm + i, firstColm) << " "
<< m_computed(firstColm + i+1, firstColm) << " "
<< m_computed(firstColm + i+2, firstColm) << "\n";
std::cout << m_computed(firstColm + i-1, firstColm + i-1) << " "
<< m_computed(firstColm + i, firstColm+i) << " "
<< m_computed(firstColm + i+1, firstColm+i+1) << " "
<< m_computed(firstColm + i+2, firstColm+i+2) << "\n";
#endif
if (r==Literal(0))
{
m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
return;
}
c/=r;
s/=r;
m_computed(firstColm + i, firstColm) = r;
m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i);
m_computed(firstColm + j, firstColm) = Literal(0);
JacobiRotation<RealScalar> J(c,-s);
if (m_compU) m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J);
else m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J);
if (m_compV) m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J);
}// end deflation 44
// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
template <typename MatrixType>
void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift)
{
using std::sqrt;
using std::abs;
const Index length = lastCol + 1 - firstCol;
Block<MatrixXr,Dynamic,1> col0(m_computed, firstCol+shift, firstCol+shift, length, 1);
Diagonal<MatrixXr> fulldiag(m_computed);
VectorBlock<Diagonal<MatrixXr>,Dynamic> diag(fulldiag, firstCol+shift, length);
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff();
RealScalar epsilon_strict = numext::maxi<RealScalar>(considerZero,NumTraits<RealScalar>::epsilon() * maxDiag);
RealScalar epsilon_coarse = Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(col0.cwiseAbs().maxCoeff(), maxDiag);
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "\ndeflate:" << diag.head(k+1).transpose() << " | " << diag.segment(k+1,length-k-1).transpose() << "\n";
#endif
//condition 4.1
if (diag(0) < epsilon_coarse)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.1, because " << diag(0) << " < " << epsilon_coarse << "\n";
#endif
diag(0) = epsilon_coarse;
}
//condition 4.2
for (Index i=1;i<length;++i)
if (abs(col0(i)) < epsilon_strict)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.2, set z(" << i << ") to zero because " << abs(col0(i)) << " < " << epsilon_strict << " (diag(" << i << ")=" << diag(i) << ")\n";
#endif
col0(i) = Literal(0);
}
//condition 4.3
for (Index i=1;i<length; i++)
if (diag(i) < epsilon_coarse)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.3, cancel z(" << i << ")=" << col0(i) << " because diag(" << i << ")=" << diag(i) << " < " << epsilon_coarse << "\n";
#endif
deflation43(firstCol, shift, i, length);
}
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "to be sorted: " << diag.transpose() << "\n\n";
#endif
{
// Check for total deflation
// If we have a total deflation, then we have to consider col0(0)==diag(0) as a singular value during sorting
bool total_deflation = (col0.tail(length-1).array()<considerZero).all();
// Sort the diagonal entries, since diag(1:k-1) and diag(k:length) are already sorted, let's do a sorted merge.
// First, compute the respective permutation.
Index *permutation = m_workspaceI.data();
{
permutation[0] = 0;
Index p = 1;
// Move deflated diagonal entries at the end.
for(Index i=1; i<length; ++i)
if(abs(diag(i))<considerZero)
permutation[p++] = i;
Index i=1, j=k+1;
for( ; p < length; ++p)
{
if (i > k) permutation[p] = j++;
else if (j >= length) permutation[p] = i++;
else if (diag(i) < diag(j)) permutation[p] = j++;
else permutation[p] = i++;
}
}
// If we have a total deflation, then we have to insert diag(0) at the right place
if(total_deflation)
{
for(Index i=1; i<length; ++i)
{
Index pi = permutation[i];
if(abs(diag(pi))<considerZero || diag(0)<diag(pi))
permutation[i-1] = permutation[i];
else
{
permutation[i-1] = 0;
break;
}
}
}
// Current index of each col, and current column of each index
Index *realInd = m_workspaceI.data()+length;
Index *realCol = m_workspaceI.data()+2*length;
for(int pos = 0; pos< length; pos++)
{
realCol[pos] = pos;
realInd[pos] = pos;
}
for(Index i = total_deflation?0:1; i < length; i++)
{
const Index pi = permutation[length - (total_deflation ? i+1 : i)];
const Index J = realCol[pi];
using std::swap;
// swap diagonal and first column entries:
swap(diag(i), diag(J));
if(i!=0 && J!=0) swap(col0(i), col0(J));
// change columns
if (m_compU) m_naiveU.col(firstCol+i).segment(firstCol, length + 1).swap(m_naiveU.col(firstCol+J).segment(firstCol, length + 1));
else m_naiveU.col(firstCol+i).segment(0, 2) .swap(m_naiveU.col(firstCol+J).segment(0, 2));
if (m_compV) m_naiveV.col(firstColW + i).segment(firstRowW, length).swap(m_naiveV.col(firstColW + J).segment(firstRowW, length));
//update real pos
const Index realI = realInd[i];
realCol[realI] = J;
realCol[pi] = i;
realInd[J] = realI;
realInd[i] = pi;
}
}
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "sorted: " << diag.transpose().format(bdcsvdfmt) << "\n";
std::cout << " : " << col0.transpose() << "\n\n";
#endif
//condition 4.4
{
Index i = length-1;
while(i>0 && (abs(diag(i))<considerZero || abs(col0(i))<considerZero)) --i;
for(; i>1;--i)
if( (diag(i) - diag(i-1)) < NumTraits<RealScalar>::epsilon()*maxDiag )
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.4 with i = " << i << " because " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*diag(i) << "\n";
#endif
eigen_internal_assert(abs(diag(i) - diag(i-1))<epsilon_coarse && " diagonal entries are not properly sorted");
deflation44(firstCol, firstCol + shift, firstRowW, firstColW, i-1, i, length);
}
}
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
for(Index j=2;j<length;++j)
assert(diag(j-1)<=diag(j) || abs(diag(j))<considerZero);
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
}//end deflation
#ifndef __CUDACC__
/** \svd_module
*
* \return the singular value decomposition of \c *this computed by Divide & Conquer algorithm
*
* \sa class BDCSVD
*/
template<typename Derived>
BDCSVD<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
{
return BDCSVD<PlainObject>(*this, computationOptions);
}
#endif
} // end namespace Eigen
#endif
| 47,664 | 37.689123 | 222 |
h
|
abess
|
abess-master/python/include/Eigen/src/SVD/JacobiSVD.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <[email protected]>
// Copyright (C) 2013-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_JACOBISVD_H
#define EIGEN_JACOBISVD_H
namespace Eigen {
namespace internal {
// forward declaration (needed by ICC)
// the empty body is required by MSVC
template<typename MatrixType, int QRPreconditioner,
bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
struct svd_precondition_2x2_block_to_be_real {};
/*** QR preconditioners (R-SVD)
***
*** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
*** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
*** JacobiSVD which by itself is only able to work on square matrices.
***/
enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
template<typename MatrixType, int QRPreconditioner, int Case>
struct qr_preconditioner_should_do_anything
{
enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
MatrixType::ColsAtCompileTime != Dynamic &&
MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
b = MatrixType::RowsAtCompileTime != Dynamic &&
MatrixType::ColsAtCompileTime != Dynamic &&
MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
ret = !( (QRPreconditioner == NoQRPreconditioner) ||
(Case == PreconditionIfMoreColsThanRows && bool(a)) ||
(Case == PreconditionIfMoreRowsThanCols && bool(b)) )
};
};
template<typename MatrixType, int QRPreconditioner, int Case,
bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
> struct qr_preconditioner_impl {};
template<typename MatrixType, int QRPreconditioner, int Case>
class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
{
public:
void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
{
return false;
}
};
/*** preconditioner using FullPivHouseholderQR ***/
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
};
typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
}
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
return true;
}
return false;
}
private:
typedef FullPivHouseholderQR<MatrixType> QRType;
QRType m_qr;
WorkspaceType m_workspace;
};
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor))
: ColsAtCompileTime==1 ? (MatrixType::Options | RowMajor)
: MatrixType::Options
};
typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, TrOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime>
TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
m_adjoint.resize(svd.cols(), svd.rows());
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
}
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
return true;
}
else return false;
}
private:
typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** preconditioner using ColPivHouseholderQR ***/
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
}
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.m_computeThinU)
{
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
}
if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
return true;
}
return false;
}
private:
typedef ColPivHouseholderQR<MatrixType> QRType;
QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor))
: ColsAtCompileTime==1 ? (MatrixType::Options | RowMajor)
: MatrixType::Options
};
typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, TrOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime>
TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
m_adjoint.resize(svd.cols(), svd.rows());
}
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.m_computeThinV)
{
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
}
if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
return true;
}
else return false;
}
private:
typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** preconditioner using HouseholderQR ***/
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
}
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.m_computeThinU)
{
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
}
if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
return true;
}
return false;
}
private:
typedef HouseholderQR<MatrixType> QRType;
QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
Options = MatrixType::Options
};
typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
m_adjoint.resize(svd.cols(), svd.rows());
}
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.m_computeThinV)
{
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
}
if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
return true;
}
else return false;
}
private:
typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** 2x2 SVD implementation
***
*** JacobiSVD consists in performing a series of 2x2 SVD subproblems
***/
template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
{
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename MatrixType::RealScalar RealScalar;
static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; }
};
template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
{
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry)
{
using std::sqrt;
using std::abs;
Scalar z;
JacobiRotation<Scalar> rot;
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
const RealScalar precision = NumTraits<Scalar>::epsilon();
if(n==0)
{
// make sure first column is zero
work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0);
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
{
// work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
}
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}
// otherwise the second row is already zero, so we have nothing to do.
}
else
{
rot.c() = conj(work_matrix.coeff(p,p)) / n;
rot.s() = work_matrix.coeff(q,p) / n;
work_matrix.applyOnTheLeft(p,q,rot);
if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.col(q) *= z;
if(svd.computeV()) svd.m_matrixV.col(q) *= z;
}
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}
}
// update largest diagonal entry
maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q))));
// and check whether the 2x2 block is already diagonal
RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold;
}
};
template<typename _MatrixType, int QRPreconditioner>
struct traits<JacobiSVD<_MatrixType,QRPreconditioner> >
{
typedef _MatrixType MatrixType;
};
} // end namespace internal
/** \ingroup SVD_Module
*
*
* \class JacobiSVD
*
* \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
*
* \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition
* \tparam QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
* for the R-SVD step for non-square matrices. See discussion of possible values below.
*
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
* \f[ A = U S V^* \f]
* where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
* the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
* and right \em singular \em vectors of \a A respectively.
*
* Singular values are always sorted in decreasing order.
*
* This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
*
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
*
* Here's an example demonstrating basic usage:
* \include JacobiSVD_basic.cpp
* Output: \verbinclude JacobiSVD_basic.out
*
* This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
* bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
* \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
* In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
*
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
* terminate in finite (and reasonable) time.
*
* The possible values for QRPreconditioner are:
* \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
* \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
* Contrary to other QRs, it doesn't allow computing thin unitaries.
* \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
* This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
* is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
* process is more reliable than the optimized bidiagonal SVD iterations.
* \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
* JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
* faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
* if QR preconditioning is needed before applying it anyway.
*
* \sa MatrixBase::jacobiSvd()
*/
template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
: public SVDBase<JacobiSVD<_MatrixType,QRPreconditioner> >
{
typedef SVDBase<JacobiSVD> Base;
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
MatrixOptions = MatrixType::Options
};
typedef typename Base::MatrixUType MatrixUType;
typedef typename Base::MatrixVType MatrixVType;
typedef typename Base::SingularValuesType SingularValuesType;
typedef typename internal::plain_row_type<MatrixType>::type RowType;
typedef typename internal::plain_col_type<MatrixType>::type ColType;
typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
WorkMatrixType;
/** \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via JacobiSVD::compute(const MatrixType&).
*/
JacobiSVD()
{}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem size.
* \sa JacobiSVD()
*/
JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
{
allocate(rows, cols, computationOptions);
}
/** \brief Constructor performing the decomposition of given matrix.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non-default) FullPivHouseholderQR preconditioner.
*/
explicit JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
{
compute(matrix, computationOptions);
}
/** \brief Method performing the decomposition of given matrix using custom options.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non-default) FullPivHouseholderQR preconditioner.
*/
JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
/** \brief Method performing the decomposition of given matrix using current options.
*
* \param matrix the matrix to decompose
*
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
*/
JacobiSVD& compute(const MatrixType& matrix)
{
return compute(matrix, m_computationOptions);
}
using Base::computeU;
using Base::computeV;
using Base::rows;
using Base::cols;
using Base::rank;
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
protected:
using Base::m_matrixU;
using Base::m_matrixV;
using Base::m_singularValues;
using Base::m_isInitialized;
using Base::m_isAllocated;
using Base::m_usePrescribedThreshold;
using Base::m_computeFullU;
using Base::m_computeThinU;
using Base::m_computeFullV;
using Base::m_computeThinV;
using Base::m_computationOptions;
using Base::m_nonzeroSingularValues;
using Base::m_rows;
using Base::m_cols;
using Base::m_diagSize;
using Base::m_prescribedThreshold;
WorkMatrixType m_workMatrix;
template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
friend struct internal::svd_precondition_2x2_block_to_be_real;
template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
friend struct internal::qr_preconditioner_impl;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
MatrixType m_scaledMatrix;
};
template<typename MatrixType, int QRPreconditioner>
void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
if (m_isAllocated &&
rows == m_rows &&
cols == m_cols &&
computationOptions == m_computationOptions)
{
return;
}
m_rows = rows;
m_cols = cols;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
m_computeFullU = (computationOptions & ComputeFullU) != 0;
m_computeThinU = (computationOptions & ComputeThinU) != 0;
m_computeFullV = (computationOptions & ComputeFullV) != 0;
m_computeThinV = (computationOptions & ComputeThinV) != 0;
eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
"JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
{
eigen_assert(!(m_computeThinU || m_computeThinV) &&
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
"Use the ColPivHouseholderQR preconditioner instead.");
}
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
if(RowsAtCompileTime==Dynamic)
m_matrixU.resize(m_rows, m_computeFullU ? m_rows
: m_computeThinU ? m_diagSize
: 0);
if(ColsAtCompileTime==Dynamic)
m_matrixV.resize(m_cols, m_computeFullV ? m_cols
: m_computeThinV ? m_diagSize
: 0);
m_workMatrix.resize(m_diagSize, m_diagSize);
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols);
}
template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
using std::abs;
allocate(matrix.rows(), matrix.cols(), computationOptions);
// currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
// only worsening the precision of U and V as we accumulate more rotations
const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
// limit for denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
// Scaling factor to reduce over/under-flows
RealScalar scale = matrix.cwiseAbs().maxCoeff();
if(scale==RealScalar(0)) scale = RealScalar(1);
/*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
if(m_rows!=m_cols)
{
m_scaledMatrix = matrix / scale;
m_qr_precond_morecols.run(*this, m_scaledMatrix);
m_qr_precond_morerows.run(*this, m_scaledMatrix);
}
else
{
m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale;
if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
}
/*** step 2. The main Jacobi SVD iteration. ***/
RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff();
bool finished = false;
while(!finished)
{
finished = true;
// do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
for(Index p = 1; p < m_diagSize; ++p)
{
for(Index q = 0; q < p; ++q)
{
// if this 2x2 sub-matrix is not diagonal already...
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
// keep us iterating forever. Similarly, small denormal numbers are considered zero.
RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
{
finished = false;
// perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
// the complex to real operation returns true if the updated 2x2 block is not already diagonal
if(internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q, maxDiagEntry))
{
JacobiRotation<RealScalar> j_left, j_right;
internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
// accumulate resulting Jacobi rotations
m_workMatrix.applyOnTheLeft(p,q,j_left);
if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
m_workMatrix.applyOnTheRight(p,q,j_right);
if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
// keep track of the largest diagonal coefficient
maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q))));
}
}
}
}
}
/*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
for(Index i = 0; i < m_diagSize; ++i)
{
// For a complex matrix, some diagonal coefficients might note have been
// treated by svd_precondition_2x2_block_to_be_real, and the imaginary part
// of some diagonal entry might not be null.
if(NumTraits<Scalar>::IsComplex && abs(numext::imag(m_workMatrix.coeff(i,i)))>considerAsZero)
{
RealScalar a = abs(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = abs(a);
if(computeU()) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
}
else
{
// m_workMatrix.coeff(i,i) is already real, no difficulty:
RealScalar a = numext::real(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = abs(a);
if(computeU() && (a<RealScalar(0))) m_matrixU.col(i) = -m_matrixU.col(i);
}
}
m_singularValues *= scale;
/*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
m_nonzeroSingularValues = m_diagSize;
for(Index i = 0; i < m_diagSize; i++)
{
Index pos;
RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);
if(maxRemainingSingularValue == RealScalar(0))
{
m_nonzeroSingularValues = i;
break;
}
if(pos)
{
pos += i;
std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
}
}
m_isInitialized = true;
return *this;
}
/** \svd_module
*
* \return the singular value decomposition of \c *this computed by two-sided
* Jacobi transformations.
*
* \sa class JacobiSVD
*/
template<typename Derived>
JacobiSVD<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
{
return JacobiSVD<PlainObject>(*this, computationOptions);
}
} // end namespace Eigen
#endif // EIGEN_JACOBISVD_H
| 32,949 | 39.931677 | 151 |
h
|
abess
|
abess-master/python/include/Eigen/src/SVD/JacobiSVD_LAPACKE.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Singular Value Decomposition - SVD.
********************************************************************************
*/
#ifndef EIGEN_JACOBISVD_LAPACKE_H
#define EIGEN_JACOBISVD_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \
template<> inline \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
{ \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
/*typedef MatrixType::Scalar Scalar;*/ \
/*typedef MatrixType::RealScalar RealScalar;*/ \
allocate(matrix.rows(), matrix.cols(), computationOptions); \
\
/*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
m_nonzeroSingularValues = m_diagSize; \
\
lapack_int lda = internal::convert_index<lapack_int>(matrix.outerStride()), ldu, ldvt; \
lapack_int matrix_order = LAPACKE_COLROW; \
char jobu, jobvt; \
LAPACKE_TYPE *u, *vt, dummy; \
jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \
jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \
if (computeU()) { \
ldu = internal::convert_index<lapack_int>(m_matrixU.outerStride()); \
u = (LAPACKE_TYPE*)m_matrixU.data(); \
} else { ldu=1; u=&dummy; }\
MatrixType localV; \
ldvt = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
if (computeV()) { \
localV.resize(ldvt, m_cols); \
vt = (LAPACKE_TYPE*)localV.data(); \
} else { ldvt=1; vt=&dummy; }\
Matrix<LAPACKE_RTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
MatrixType m_temp; m_temp = matrix; \
LAPACKE_##LAPACKE_PREFIX##gesvd( matrix_order, jobu, jobvt, internal::convert_index<lapack_int>(m_rows), internal::convert_index<lapack_int>(m_cols), (LAPACKE_TYPE*)m_temp.data(), lda, (LAPACKE_RTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \
if (computeV()) m_matrixV = localV.adjoint(); \
/* for(int i=0;i<m_diagSize;i++) if (m_singularValues.coeffRef(i) < precision) { m_nonzeroSingularValues--; m_singularValues.coeffRef(i)=RealScalar(0);}*/ \
m_isInitialized = true; \
return *this; \
}
EIGEN_LAPACKE_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen
#endif // EIGEN_JACOBISVD_LAPACKE_H
| 5,009 | 54.054945 | 263 |
h
|
abess
|
abess-master/python/include/Eigen/src/SVD/SVDBase.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <[email protected]>
// Copyright (C) 2014 Gael Guennebaud <[email protected]>
//
// Copyright (C) 2013 Gauthier Brun <[email protected]>
// Copyright (C) 2013 Nicolas Carre <[email protected]>
// Copyright (C) 2013 Jean Ceccato <[email protected]>
// Copyright (C) 2013 Pierre Zoppitelli <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SVDBASE_H
#define EIGEN_SVDBASE_H
namespace Eigen {
/** \ingroup SVD_Module
*
*
* \class SVDBase
*
* \brief Base class of SVD algorithms
*
* \tparam Derived the type of the actual SVD decomposition
*
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
* \f[ A = U S V^* \f]
* where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
* the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
* and right \em singular \em vectors of \a A respectively.
*
* Singular values are always sorted in decreasing order.
*
*
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
*
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
* terminate in finite (and reasonable) time.
* \sa class BDCSVD, class JacobiSVD
*/
template<typename Derived>
class SVDBase
{
public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
MatrixOptions = MatrixType::Options
};
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixUType;
typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime> MatrixVType;
typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
/** \returns the \a U matrix.
*
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
* the U matrix is n-by-n if you asked for \link Eigen::ComputeFullU ComputeFullU \endlink, and is n-by-m if you asked for \link Eigen::ComputeThinU ComputeThinU \endlink.
*
* The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
*
* This method asserts that you asked for \a U to be computed.
*/
const MatrixUType& matrixU() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
return m_matrixU;
}
/** \returns the \a V matrix.
*
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
* the V matrix is p-by-p if you asked for \link Eigen::ComputeFullV ComputeFullV \endlink, and is p-by-m if you asked for \link Eigen::ComputeThinV ComputeThinV \endlink.
*
* The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
*
* This method asserts that you asked for \a V to be computed.
*/
const MatrixVType& matrixV() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
return m_matrixV;
}
/** \returns the vector of singular values.
*
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
* returned vector has size \a m. Singular values are always sorted in decreasing order.
*/
const SingularValuesType& singularValues() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
return m_singularValues;
}
/** \returns the number of singular values that are not exactly 0 */
Index nonzeroSingularValues() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
return m_nonzeroSingularValues;
}
/** \returns the rank of the matrix of which \c *this is the SVD.
*
* \note This method has to determine which singular values should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index rank() const
{
using std::abs;
eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
if(m_singularValues.size()==0) return 0;
RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)());
Index i = m_nonzeroSingularValues-1;
while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
return i+1;
}
/** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
* which need to determine when singular values are to be considered nonzero.
* This is not used for the SVD decomposition itself.
*
* When it needs to get the threshold value, Eigen calls threshold().
* The default is \c NumTraits<Scalar>::epsilon()
*
* \param threshold The new value to use as the threshold.
*
* A singular value will be considered nonzero if its value is strictly greater than
* \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
*
* If you want to come back to the default behavior, call setThreshold(Default_t)
*/
Derived& setThreshold(const RealScalar& threshold)
{
m_usePrescribedThreshold = true;
m_prescribedThreshold = threshold;
return derived();
}
/** Allows to come back to the default behavior, letting Eigen use its default formula for
* determining the threshold.
*
* You should pass the special object Eigen::Default as parameter here.
* \code svd.setThreshold(Eigen::Default); \endcode
*
* See the documentation of setThreshold(const RealScalar&).
*/
Derived& setThreshold(Default_t)
{
m_usePrescribedThreshold = false;
return derived();
}
/** Returns the threshold that will be used by certain methods such as rank().
*
* See the documentation of setThreshold(const RealScalar&).
*/
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
return m_usePrescribedThreshold ? m_prescribedThreshold
: (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
}
/** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
inline bool computeU() const { return m_computeFullU || m_computeThinU; }
/** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
inline bool computeV() const { return m_computeFullV || m_computeThinV; }
inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; }
/** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
*
* \param b the right-hand-side of the equation to solve.
*
* \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
*
* \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
* In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
return Solve<Derived, Rhs>(derived(), b.derived());
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
// return true if already allocated
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
MatrixUType m_matrixU;
MatrixVType m_matrixV;
SingularValuesType m_singularValues;
bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
bool m_computeFullU, m_computeThinU;
bool m_computeFullV, m_computeThinV;
unsigned int m_computationOptions;
Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
RealScalar m_prescribedThreshold;
/** \brief Default Constructor.
*
* Default constructor of SVDBase
*/
SVDBase()
: m_isInitialized(false),
m_isAllocated(false),
m_usePrescribedThreshold(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1), m_diagSize(0)
{
check_template_parameters();
}
};
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Derived>
template<typename RhsType, typename DstType>
void SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
eigen_assert(rhs.rows() == rows());
// A = U S V^*
// So A^{-1} = V S^{-1} U^*
Matrix<Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
Index l_rank = rank();
tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs;
tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
dst = m_matrixV.leftCols(l_rank) * tmp;
}
#endif
template<typename MatrixType>
bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
if (m_isAllocated &&
rows == m_rows &&
cols == m_cols &&
computationOptions == m_computationOptions)
{
return true;
}
m_rows = rows;
m_cols = cols;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
m_computeFullU = (computationOptions & ComputeFullU) != 0;
m_computeThinU = (computationOptions & ComputeThinU) != 0;
m_computeFullV = (computationOptions & ComputeFullV) != 0;
m_computeThinV = (computationOptions & ComputeThinV) != 0;
eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
"SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
if(RowsAtCompileTime==Dynamic)
m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0);
if(ColsAtCompileTime==Dynamic)
m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0);
return false;
}
}// end namespace
#endif // EIGEN_SVDBASE_H
| 12,650 | 39.289809 | 173 |
h
|
abess
|
abess-master/python/include/Eigen/src/SVD/UpperBidiagonalization.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Benoit Jacob <[email protected]>
// Copyright (C) 2013-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BIDIAGONALIZATION_H
#define EIGEN_BIDIAGONALIZATION_H
namespace Eigen {
namespace internal {
// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API.
// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class.
template<typename _MatrixType> class UpperBidiagonalization
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0, RowMajor> BidiagonalType;
typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType;
typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType;
typedef HouseholderSequence<
const MatrixType,
const typename internal::remove_all<typename Diagonal<const MatrixType,0>::ConjugateReturnType>::type
> HouseholderUSequenceType;
typedef HouseholderSequence<
const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
Diagonal<const MatrixType,1>,
OnTheRight
> HouseholderVSequenceType;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via Bidiagonalization::compute(const MatrixType&).
*/
UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {}
explicit UpperBidiagonalization(const MatrixType& matrix)
: m_householder(matrix.rows(), matrix.cols()),
m_bidiagonal(matrix.cols(), matrix.cols()),
m_isInitialized(false)
{
compute(matrix);
}
UpperBidiagonalization& compute(const MatrixType& matrix);
UpperBidiagonalization& computeUnblocked(const MatrixType& matrix);
const MatrixType& householder() const { return m_householder; }
const BidiagonalType& bidiagonal() const { return m_bidiagonal; }
const HouseholderUSequenceType householderU() const
{
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate());
}
const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
{
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
.setLength(m_householder.cols()-1)
.setShift(1);
}
protected:
MatrixType m_householder;
BidiagonalType m_bidiagonal;
bool m_isInitialized;
};
// Standard upper bidiagonalization without fancy optimizations
// This version should be faster for small matrix size
template<typename MatrixType>
void upperbidiagonalization_inplace_unblocked(MatrixType& mat,
typename MatrixType::RealScalar *diagonal,
typename MatrixType::RealScalar *upper_diagonal,
typename MatrixType::Scalar* tempData = 0)
{
typedef typename MatrixType::Scalar Scalar;
Index rows = mat.rows();
Index cols = mat.cols();
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType;
TempType tempVector;
if(tempData==0)
{
tempVector.resize(rows);
tempData = tempVector.data();
}
for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
{
Index remainingRows = rows - k;
Index remainingCols = cols - k - 1;
// construct left householder transform in-place in A
mat.col(k).tail(remainingRows)
.makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]);
// apply householder transform to remaining part of A on the left
mat.bottomRightCorner(remainingRows, remainingCols)
.applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData);
if(k == cols-1) break;
// construct right householder transform in-place in mat
mat.row(k).tail(remainingCols)
.makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
// apply householder transform to remaining part of mat on the left
mat.bottomRightCorner(remainingRows-1, remainingCols)
.applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);
}
}
/** \internal
* Helper routine for the block reduction to upper bidiagonal form.
*
* Let's partition the matrix A:
*
* | A00 A01 |
* A = | |
* | A10 A11 |
*
* This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10]
* and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11
* is updated using matrix-matrix products:
* A22 -= V * Y^T - X * U^T
* where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01
* respectively, and the update matrices X and Y are computed during the reduction.
*
*/
template<typename MatrixType>
void upperbidiagonalization_blocked_helper(MatrixType& A,
typename MatrixType::RealScalar *diagonal,
typename MatrixType::RealScalar *upper_diagonal,
Index bs,
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
traits<MatrixType>::Flags & RowMajorBit> > X,
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
traits<MatrixType>::Flags & RowMajorBit> > Y)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename NumTraits<RealScalar>::Literal Literal;
enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride;
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride;
typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride> SubColumnType;
typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride> SubRowType;
typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType;
Index brows = A.rows();
Index bcols = A.cols();
Scalar tau_u, tau_u_prev(0), tau_v;
for(Index k = 0; k < bs; ++k)
{
Index remainingRows = brows - k;
Index remainingCols = bcols - k - 1;
SubMatType X_k1( X.block(k,0, remainingRows,k) );
SubMatType V_k1( A.block(k,0, remainingRows,k) );
// 1 - update the k-th column of A
SubColumnType v_k = A.col(k).tail(remainingRows);
v_k -= V_k1 * Y.row(k).head(k).adjoint();
if(k) v_k -= X_k1 * A.col(k).head(k);
// 2 - construct left Householder transform in-place
v_k.makeHouseholderInPlace(tau_v, diagonal[k]);
if(k+1<bcols)
{
SubMatType Y_k ( Y.block(k+1,0, remainingCols, k+1) );
SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) );
// this eases the application of Householder transforAions
// A(k,k) will store tau_v later
A(k,k) = Scalar(1);
// 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k )
{
SubColumnType y_k( Y.col(k).tail(remainingCols) );
// let's use the begining of column k of Y as a temporary vector
SubColumnType tmp( Y.col(k).head(k) );
y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
tmp.noalias() = V_k1.adjoint() * v_k;
y_k.noalias() -= Y_k.leftCols(k) * tmp;
tmp.noalias() = X_k1.adjoint() * v_k;
y_k.noalias() -= U_k1.adjoint() * tmp;
y_k *= numext::conj(tau_v);
}
// 4 - update k-th row of A (it will become u_k)
SubRowType u_k( A.row(k).tail(remainingCols) );
u_k = u_k.conjugate();
{
u_k -= Y_k * A.row(k).head(k+1).adjoint();
if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint();
}
// 5 - construct right Householder transform in-place
u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]);
// this eases the application of Householder transformations
// A(k,k+1) will store tau_u later
A(k,k+1) = Scalar(1);
// 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k )
{
SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
// let's use the begining of column k of X as a temporary vectors
// note that tmp0 and tmp1 overlaps
SubColumnType tmp0 ( X.col(k).head(k) ),
tmp1 ( X.col(k).head(k+1) );
x_k.noalias() = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck
tmp0.noalias() = U_k1 * u_k.transpose();
x_k.noalias() -= X_k1.bottomRows(remainingRows-1) * tmp0;
tmp1.noalias() = Y_k.adjoint() * u_k.transpose();
x_k.noalias() -= A.block(k+1,0, remainingRows-1,k+1) * tmp1;
x_k *= numext::conj(tau_u);
tau_u = numext::conj(tau_u);
u_k = u_k.conjugate();
}
if(k>0) A.coeffRef(k-1,k) = tau_u_prev;
tau_u_prev = tau_u;
}
else
A.coeffRef(k-1,k) = tau_u_prev;
A.coeffRef(k,k) = tau_v;
}
if(bs<bcols)
A.coeffRef(bs-1,bs) = tau_u_prev;
// update A22
if(bcols>bs && brows>bs)
{
SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) );
SubMatType A10( A.block(bs,0, brows-bs,bs) );
SubMatType A01( A.block(0,bs, bs,bcols-bs) );
Scalar tmp = A01(bs-1,0);
A01(bs-1,0) = Literal(1);
A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();
A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;
A01(bs-1,0) = tmp;
}
}
/** \internal
*
* Implementation of a block-bidiagonal reduction.
* It is based on the following paper:
* The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form.
* by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995)
* section 3.3
*/
template<typename MatrixType, typename BidiagType>
void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal,
Index maxBlockSize=32,
typename MatrixType::Scalar* /*tempData*/ = 0)
{
typedef typename MatrixType::Scalar Scalar;
typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
Index rows = A.rows();
Index cols = A.cols();
Index size = (std::min)(rows, cols);
// X and Y are work space
enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
Matrix<Scalar,
MatrixType::RowsAtCompileTime,
Dynamic,
StorageOrder,
MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
Matrix<Scalar,
MatrixType::ColsAtCompileTime,
Dynamic,
StorageOrder,
MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
Index blockSize = (std::min)(maxBlockSize,size);
Index k = 0;
for(k = 0; k < size; k += blockSize)
{
Index bs = (std::min)(size-k,blockSize); // actual size of the block
Index brows = rows - k; // rows of the block
Index bcols = cols - k; // columns of the block
// partition the matrix A:
//
// | A00 A01 A02 |
// | |
// A = | A10 A11 A12 |
// | |
// | A20 A21 A22 |
//
// where A11 is a bs x bs diagonal block,
// and let:
// | A11 A12 |
// B = | |
// | A21 A22 |
BlockType B = A.block(k,k,brows,bcols);
// This stage performs the bidiagonalization of A11, A21, A12, and updating of A22.
// Finally, the algorithm continue on the updated A22.
//
// However, if B is too small, or A22 empty, then let's use an unblocked strategy
if(k+bs==cols || bcols<48) // somewhat arbitrary threshold
{
upperbidiagonalization_inplace_unblocked(B,
&(bidiagonal.template diagonal<0>().coeffRef(k)),
&(bidiagonal.template diagonal<1>().coeffRef(k)),
X.data()
);
break; // We're done
}
else
{
upperbidiagonalization_blocked_helper<BlockType>( B,
&(bidiagonal.template diagonal<0>().coeffRef(k)),
&(bidiagonal.template diagonal<1>().coeffRef(k)),
bs,
X.topLeftCorner(brows,bs),
Y.topLeftCorner(bcols,bs)
);
}
}
}
template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix)
{
Index rows = matrix.rows();
Index cols = matrix.cols();
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
m_householder = matrix;
ColVectorType temp(rows);
upperbidiagonalization_inplace_unblocked(m_householder,
&(m_bidiagonal.template diagonal<0>().coeffRef(0)),
&(m_bidiagonal.template diagonal<1>().coeffRef(0)),
temp.data());
m_isInitialized = true;
return *this;
}
template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
{
Index rows = matrix.rows();
Index cols = matrix.cols();
EIGEN_ONLY_USED_FOR_DEBUG(rows);
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
m_householder = matrix;
upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal);
m_isInitialized = true;
return *this;
}
#if 0
/** \return the Householder QR decomposition of \c *this.
*
* \sa class Bidiagonalization
*/
template<typename Derived>
const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::bidiagonalization() const
{
return UpperBidiagonalization<PlainObject>(eval());
}
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_BIDIAGONALIZATION_H
| 15,957 | 37.453012 | 128 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCholesky/SimplicialCholesky.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2012 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SIMPLICIAL_CHOLESKY_H
#define EIGEN_SIMPLICIAL_CHOLESKY_H
namespace Eigen {
enum SimplicialCholeskyMode {
SimplicialCholeskyLLT,
SimplicialCholeskyLDLT
};
namespace internal {
template<typename CholMatrixType, typename InputMatrixType>
struct simplicial_cholesky_grab_input {
typedef CholMatrixType const * ConstCholMatrixPtr;
static void run(const InputMatrixType& input, ConstCholMatrixPtr &pmat, CholMatrixType &tmp)
{
tmp = input;
pmat = &tmp;
}
};
template<typename MatrixType>
struct simplicial_cholesky_grab_input<MatrixType,MatrixType> {
typedef MatrixType const * ConstMatrixPtr;
static void run(const MatrixType& input, ConstMatrixPtr &pmat, MatrixType &/*tmp*/)
{
pmat = &input;
}
};
} // end namespace internal
/** \ingroup SparseCholesky_Module
* \brief A base class for direct sparse Cholesky factorizations
*
* This is a base class for LL^T and LDL^T Cholesky factorizations of sparse matrices that are
* selfadjoint and positive definite. These factorizations allow for solving A.X = B where
* X and B can be either dense or sparse.
*
* In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization
* such that the factorized matrix is P A P^-1.
*
* \tparam Derived the type of the derived class, that is the actual factorization type.
*
*/
template<typename Derived>
class SimplicialCholeskyBase : public SparseSolverBase<Derived>
{
typedef SparseSolverBase<Derived> Base;
using Base::m_isInitialized;
public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename internal::traits<Derived>::OrderingType OrderingType;
enum { UpLo = internal::traits<Derived>::UpLo };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType;
typedef CholMatrixType const * ConstCholMatrixPtr;
typedef Matrix<Scalar,Dynamic,1> VectorType;
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public:
using Base::derived;
/** Default constructor */
SimplicialCholeskyBase()
: m_info(Success), m_shiftOffset(0), m_shiftScale(1)
{}
explicit SimplicialCholeskyBase(const MatrixType& matrix)
: m_info(Success), m_shiftOffset(0), m_shiftScale(1)
{
derived().compute(matrix);
}
~SimplicialCholeskyBase()
{
}
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
inline Index cols() const { return m_matrix.cols(); }
inline Index rows() const { return m_matrix.rows(); }
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
/** \returns the permutation P
* \sa permutationPinv() */
const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& permutationP() const
{ return m_P; }
/** \returns the inverse P^-1 of the permutation P
* \sa permutationP() */
const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& permutationPinv() const
{ return m_Pinv; }
/** Sets the shift parameters that will be used to adjust the diagonal coefficients during the numerical factorization.
*
* During the numerical factorization, the diagonal coefficients are transformed by the following linear model:\n
* \c d_ii = \a offset + \a scale * \c d_ii
*
* The default is the identity transformation with \a offset=0, and \a scale=1.
*
* \returns a reference to \c *this.
*/
Derived& setShift(const RealScalar& offset, const RealScalar& scale = 1)
{
m_shiftOffset = offset;
m_shiftScale = scale;
return derived();
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal */
template<typename Stream>
void dumpMemory(Stream& s)
{
int total = 0;
s << " L: " << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << "Mb" << "\n";
s << " diag: " << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << "Mb" << "\n";
s << " tree: " << ((total+=m_parent.size() * sizeof(int)) >> 20) << "Mb" << "\n";
s << " nonzeros: " << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << "Mb" << "\n";
s << " perm: " << ((total+=m_P.size() * sizeof(int)) >> 20) << "Mb" << "\n";
s << " perm^-1: " << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << "Mb" << "\n";
s << " TOTAL: " << (total>> 20) << "Mb" << "\n";
}
/** \internal */
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
{
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
eigen_assert(m_matrix.rows()==b.rows());
if(m_info!=Success)
return;
if(m_P.size()>0)
dest = m_P * b;
else
dest = b;
if(m_matrix.nonZeros()>0) // otherwise L==I
derived().matrixL().solveInPlace(dest);
if(m_diag.size()>0)
dest = m_diag.asDiagonal().inverse() * dest;
if (m_matrix.nonZeros()>0) // otherwise U==I
derived().matrixU().solveInPlace(dest);
if(m_P.size()>0)
dest = m_Pinv * dest;
}
template<typename Rhs,typename Dest>
void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const
{
internal::solve_sparse_through_dense_panels(derived(), b, dest);
}
#endif // EIGEN_PARSED_BY_DOXYGEN
protected:
/** Computes the sparse Cholesky decomposition of \a matrix */
template<bool DoLDLT>
void compute(const MatrixType& matrix)
{
eigen_assert(matrix.rows()==matrix.cols());
Index size = matrix.cols();
CholMatrixType tmp(size,size);
ConstCholMatrixPtr pmat;
ordering(matrix, pmat, tmp);
analyzePattern_preordered(*pmat, DoLDLT);
factorize_preordered<DoLDLT>(*pmat);
}
template<bool DoLDLT>
void factorize(const MatrixType& a)
{
eigen_assert(a.rows()==a.cols());
Index size = a.cols();
CholMatrixType tmp(size,size);
ConstCholMatrixPtr pmat;
if(m_P.size()==0 && (UpLo&Upper)==Upper)
{
// If there is no ordering, try to directly use the input matrix without any copy
internal::simplicial_cholesky_grab_input<CholMatrixType,MatrixType>::run(a, pmat, tmp);
}
else
{
tmp.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P);
pmat = &tmp;
}
factorize_preordered<DoLDLT>(*pmat);
}
template<bool DoLDLT>
void factorize_preordered(const CholMatrixType& a);
void analyzePattern(const MatrixType& a, bool doLDLT)
{
eigen_assert(a.rows()==a.cols());
Index size = a.cols();
CholMatrixType tmp(size,size);
ConstCholMatrixPtr pmat;
ordering(a, pmat, tmp);
analyzePattern_preordered(*pmat,doLDLT);
}
void analyzePattern_preordered(const CholMatrixType& a, bool doLDLT);
void ordering(const MatrixType& a, ConstCholMatrixPtr &pmat, CholMatrixType& ap);
/** keeps off-diagonal entries; drops diagonal entries */
struct keep_diag {
inline bool operator() (const Index& row, const Index& col, const Scalar&) const
{
return row!=col;
}
};
mutable ComputationInfo m_info;
bool m_factorizationIsOk;
bool m_analysisIsOk;
CholMatrixType m_matrix;
VectorType m_diag; // the diagonal coefficients (LDLT mode)
VectorI m_parent; // elimination tree
VectorI m_nonZerosPerCol;
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // the permutation
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // the inverse permutation
RealScalar m_shiftOffset;
RealScalar m_shiftScale;
};
template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialLLT;
template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialLDLT;
template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialCholesky;
namespace internal {
template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
{
typedef _MatrixType MatrixType;
typedef _Ordering OrderingType;
enum { UpLo = _UpLo };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType;
typedef TriangularView<const CholMatrixType, Eigen::Lower> MatrixL;
typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::Upper> MatrixU;
static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
};
template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
{
typedef _MatrixType MatrixType;
typedef _Ordering OrderingType;
enum { UpLo = _UpLo };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType;
typedef TriangularView<const CholMatrixType, Eigen::UnitLower> MatrixL;
typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU;
static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
};
template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
{
typedef _MatrixType MatrixType;
typedef _Ordering OrderingType;
enum { UpLo = _UpLo };
};
}
/** \ingroup SparseCholesky_Module
* \class SimplicialLLT
* \brief A direct sparse LLT Cholesky factorizations
*
* This class provides a LL^T Cholesky factorizations of sparse matrices that are
* selfadjoint and positive definite. The factorization allows for solving A.X = B where
* X and B can be either dense or sparse.
*
* In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization
* such that the factorized matrix is P A P^-1.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower.
* \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
*
* \implsparsesolverconcept
*
* \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering
*/
template<typename _MatrixType, int _UpLo, typename _Ordering>
class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
{
public:
typedef _MatrixType MatrixType;
enum { UpLo = _UpLo };
typedef SimplicialCholeskyBase<SimplicialLLT> Base;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
typedef Matrix<Scalar,Dynamic,1> VectorType;
typedef internal::traits<SimplicialLLT> Traits;
typedef typename Traits::MatrixL MatrixL;
typedef typename Traits::MatrixU MatrixU;
public:
/** Default constructor */
SimplicialLLT() : Base() {}
/** Constructs and performs the LLT factorization of \a matrix */
explicit SimplicialLLT(const MatrixType& matrix)
: Base(matrix) {}
/** \returns an expression of the factor L */
inline const MatrixL matrixL() const {
eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized");
return Traits::getL(Base::m_matrix);
}
/** \returns an expression of the factor U (= L^*) */
inline const MatrixU matrixU() const {
eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized");
return Traits::getU(Base::m_matrix);
}
/** Computes the sparse Cholesky decomposition of \a matrix */
SimplicialLLT& compute(const MatrixType& matrix)
{
Base::template compute<false>(matrix);
return *this;
}
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
* \sa factorize()
*/
void analyzePattern(const MatrixType& a)
{
Base::analyzePattern(a, false);
}
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
*
* \sa analyzePattern()
*/
void factorize(const MatrixType& a)
{
Base::template factorize<false>(a);
}
/** \returns the determinant of the underlying matrix from the current factorization */
Scalar determinant() const
{
Scalar detL = Base::m_matrix.diagonal().prod();
return numext::abs2(detL);
}
};
/** \ingroup SparseCholesky_Module
* \class SimplicialLDLT
* \brief A direct sparse LDLT Cholesky factorizations without square root.
*
* This class provides a LDL^T Cholesky factorizations without square root of sparse matrices that are
* selfadjoint and positive definite. The factorization allows for solving A.X = B where
* X and B can be either dense or sparse.
*
* In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization
* such that the factorized matrix is P A P^-1.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower.
* \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
*
* \implsparsesolverconcept
*
* \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering
*/
template<typename _MatrixType, int _UpLo, typename _Ordering>
class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
{
public:
typedef _MatrixType MatrixType;
enum { UpLo = _UpLo };
typedef SimplicialCholeskyBase<SimplicialLDLT> Base;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType;
typedef Matrix<Scalar,Dynamic,1> VectorType;
typedef internal::traits<SimplicialLDLT> Traits;
typedef typename Traits::MatrixL MatrixL;
typedef typename Traits::MatrixU MatrixU;
public:
/** Default constructor */
SimplicialLDLT() : Base() {}
/** Constructs and performs the LLT factorization of \a matrix */
explicit SimplicialLDLT(const MatrixType& matrix)
: Base(matrix) {}
/** \returns a vector expression of the diagonal D */
inline const VectorType vectorD() const {
eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized");
return Base::m_diag;
}
/** \returns an expression of the factor L */
inline const MatrixL matrixL() const {
eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized");
return Traits::getL(Base::m_matrix);
}
/** \returns an expression of the factor U (= L^*) */
inline const MatrixU matrixU() const {
eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized");
return Traits::getU(Base::m_matrix);
}
/** Computes the sparse Cholesky decomposition of \a matrix */
SimplicialLDLT& compute(const MatrixType& matrix)
{
Base::template compute<true>(matrix);
return *this;
}
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
* \sa factorize()
*/
void analyzePattern(const MatrixType& a)
{
Base::analyzePattern(a, true);
}
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
*
* \sa analyzePattern()
*/
void factorize(const MatrixType& a)
{
Base::template factorize<true>(a);
}
/** \returns the determinant of the underlying matrix from the current factorization */
Scalar determinant() const
{
return Base::m_diag.prod();
}
};
/** \deprecated use SimplicialLDLT or class SimplicialLLT
* \ingroup SparseCholesky_Module
* \class SimplicialCholesky
*
* \sa class SimplicialLDLT, class SimplicialLLT
*/
template<typename _MatrixType, int _UpLo, typename _Ordering>
class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
{
public:
typedef _MatrixType MatrixType;
enum { UpLo = _UpLo };
typedef SimplicialCholeskyBase<SimplicialCholesky> Base;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType;
typedef Matrix<Scalar,Dynamic,1> VectorType;
typedef internal::traits<SimplicialCholesky> Traits;
typedef internal::traits<SimplicialLDLT<MatrixType,UpLo> > LDLTTraits;
typedef internal::traits<SimplicialLLT<MatrixType,UpLo> > LLTTraits;
public:
SimplicialCholesky() : Base(), m_LDLT(true) {}
explicit SimplicialCholesky(const MatrixType& matrix)
: Base(), m_LDLT(true)
{
compute(matrix);
}
SimplicialCholesky& setMode(SimplicialCholeskyMode mode)
{
switch(mode)
{
case SimplicialCholeskyLLT:
m_LDLT = false;
break;
case SimplicialCholeskyLDLT:
m_LDLT = true;
break;
default:
break;
}
return *this;
}
inline const VectorType vectorD() const {
eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized");
return Base::m_diag;
}
inline const CholMatrixType rawMatrix() const {
eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized");
return Base::m_matrix;
}
/** Computes the sparse Cholesky decomposition of \a matrix */
SimplicialCholesky& compute(const MatrixType& matrix)
{
if(m_LDLT)
Base::template compute<true>(matrix);
else
Base::template compute<false>(matrix);
return *this;
}
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
* \sa factorize()
*/
void analyzePattern(const MatrixType& a)
{
Base::analyzePattern(a, m_LDLT);
}
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
*
* \sa analyzePattern()
*/
void factorize(const MatrixType& a)
{
if(m_LDLT)
Base::template factorize<true>(a);
else
Base::template factorize<false>(a);
}
/** \internal */
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
{
eigen_assert(Base::m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
eigen_assert(Base::m_matrix.rows()==b.rows());
if(Base::m_info!=Success)
return;
if(Base::m_P.size()>0)
dest = Base::m_P * b;
else
dest = b;
if(Base::m_matrix.nonZeros()>0) // otherwise L==I
{
if(m_LDLT)
LDLTTraits::getL(Base::m_matrix).solveInPlace(dest);
else
LLTTraits::getL(Base::m_matrix).solveInPlace(dest);
}
if(Base::m_diag.size()>0)
dest = Base::m_diag.asDiagonal().inverse() * dest;
if (Base::m_matrix.nonZeros()>0) // otherwise I==I
{
if(m_LDLT)
LDLTTraits::getU(Base::m_matrix).solveInPlace(dest);
else
LLTTraits::getU(Base::m_matrix).solveInPlace(dest);
}
if(Base::m_P.size()>0)
dest = Base::m_Pinv * dest;
}
/** \internal */
template<typename Rhs,typename Dest>
void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const
{
internal::solve_sparse_through_dense_panels(*this, b, dest);
}
Scalar determinant() const
{
if(m_LDLT)
{
return Base::m_diag.prod();
}
else
{
Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod();
return numext::abs2(detL);
}
}
protected:
bool m_LDLT;
};
template<typename Derived>
void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, ConstCholMatrixPtr &pmat, CholMatrixType& ap)
{
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
pmat = ≈
// Note that ordering methods compute the inverse permutation
if(!internal::is_same<OrderingType,NaturalOrdering<Index> >::value)
{
{
CholMatrixType C;
C = a.template selfadjointView<UpLo>();
OrderingType ordering;
ordering(C,m_Pinv);
}
if(m_Pinv.size()>0) m_P = m_Pinv.inverse();
else m_P.resize(0);
ap.resize(size,size);
ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P);
}
else
{
m_Pinv.resize(0);
m_P.resize(0);
if(int(UpLo)==int(Lower) || MatrixType::IsRowMajor)
{
// we have to transpose the lower part to to the upper one
ap.resize(size,size);
ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>();
}
else
internal::simplicial_cholesky_grab_input<CholMatrixType,MatrixType>::run(a, pmat, ap);
}
}
} // end namespace Eigen
#endif // EIGEN_SIMPLICIAL_CHOLESKY_H
| 24,010 | 33.798551 | 167 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2012 Gael Guennebaud <[email protected]>
/*
NOTE: thes functions vave been adapted from the LDL library:
LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
LDL License:
Your use or distribution of LDL or any modified version of
LDL implies that you agree to this License.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
Permission is hereby granted to use or copy this program under the
terms of the GNU LGPL, provided that the Copyright, this License,
and the Availability of the original version is retained on all copies.
User documentation of any code that uses this code or any modified
version of this code must cite the Copyright, this License, the
Availability note, and "Used by permission." Permission to modify
the code and to distribute modified code is granted, provided the
Copyright, this License, and the Availability note are retained,
and a notice that the code was modified is included.
*/
#include "../Core/util/NonMPL2.h"
#ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
#define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
namespace Eigen {
template<typename Derived>
void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
{
const StorageIndex size = StorageIndex(ap.rows());
m_matrix.resize(size, size);
m_parent.resize(size);
m_nonZerosPerCol.resize(size);
ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0);
for(StorageIndex k = 0; k < size; ++k)
{
/* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
m_parent[k] = -1; /* parent of k is not yet known */
tags[k] = k; /* mark node k as visited */
m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
{
StorageIndex i = it.index();
if(i < k)
{
/* follow path from i to root of etree, stop at flagged node */
for(; tags[i] != k; i = m_parent[i])
{
/* find parent of i if not yet determined */
if (m_parent[i] == -1)
m_parent[i] = k;
m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */
tags[i] = k; /* mark i as visited */
}
}
}
}
/* construct Lp index array from m_nonZerosPerCol column counts */
StorageIndex* Lp = m_matrix.outerIndexPtr();
Lp[0] = 0;
for(StorageIndex k = 0; k < size; ++k)
Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
m_matrix.resizeNonZeros(Lp[size]);
m_isInitialized = true;
m_info = Success;
m_analysisIsOk = true;
m_factorizationIsOk = false;
}
template<typename Derived>
template<bool DoLDLT>
void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap)
{
using std::sqrt;
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
eigen_assert(ap.rows()==ap.cols());
eigen_assert(m_parent.size()==ap.rows());
eigen_assert(m_nonZerosPerCol.size()==ap.rows());
const StorageIndex size = StorageIndex(ap.rows());
const StorageIndex* Lp = m_matrix.outerIndexPtr();
StorageIndex* Li = m_matrix.innerIndexPtr();
Scalar* Lx = m_matrix.valuePtr();
ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
ei_declare_aligned_stack_constructed_variable(StorageIndex, pattern, size, 0);
ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0);
bool ok = true;
m_diag.resize(DoLDLT ? size : 0);
for(StorageIndex k = 0; k < size; ++k)
{
// compute nonzero pattern of kth row of L, in topological order
y[k] = 0.0; // Y(0:k) is now all zero
StorageIndex top = size; // stack for pattern is empty
tags[k] = k; // mark node k as visited
m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
{
StorageIndex i = it.index();
if(i <= k)
{
y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
Index len;
for(len = 0; tags[i] != k; i = m_parent[i])
{
pattern[len++] = i; /* L(k,i) is nonzero */
tags[i] = k; /* mark i as visited */
}
while(len > 0)
pattern[--top] = pattern[--len];
}
}
/* compute numerical values kth row of L (a sparse triangular solve) */
RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
y[k] = 0.0;
for(; top < size; ++top)
{
Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
Scalar yi = y[i]; /* get and clear Y(i) */
y[i] = 0.0;
/* the nonzero entry L(k,i) */
Scalar l_ki;
if(DoLDLT)
l_ki = yi / m_diag[i];
else
yi = l_ki = yi / Lx[Lp[i]];
Index p2 = Lp[i] + m_nonZerosPerCol[i];
Index p;
for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
y[Li[p]] -= numext::conj(Lx[p]) * yi;
d -= numext::real(l_ki * numext::conj(yi));
Li[p] = k; /* store L(k,i) in column form of L */
Lx[p] = l_ki;
++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
}
if(DoLDLT)
{
m_diag[k] = d;
if(d == RealScalar(0))
{
ok = false; /* failure, D(k,k) is zero */
break;
}
}
else
{
Index p = Lp[k] + m_nonZerosPerCol[k]++;
Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */
if(d <= RealScalar(0)) {
ok = false; /* failure, matrix is not positive definite */
break;
}
Lx[p] = sqrt(d) ;
}
}
m_info = ok ? Success : NumericalIssue;
m_factorizationIsOk = true;
}
} // end namespace Eigen
#endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
| 6,898 | 33.495 | 128 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/AmbiVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_AMBIVECTOR_H
#define EIGEN_AMBIVECTOR_H
namespace Eigen {
namespace internal {
/** \internal
* Hybrid sparse/dense vector class designed for intensive read-write operations.
*
* See BasicSparseLLT and SparseProduct for usage examples.
*/
template<typename _Scalar, typename _StorageIndex>
class AmbiVector
{
public:
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef typename NumTraits<Scalar>::Real RealScalar;
explicit AmbiVector(Index size)
: m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
{
resize(size);
}
void init(double estimatedDensity);
void init(int mode);
Index nonZeros() const;
/** Specifies a sub-vector to work on */
void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }
void setZero();
void restart();
Scalar& coeffRef(Index i);
Scalar& coeff(Index i);
class Iterator;
~AmbiVector() { delete[] m_buffer; }
void resize(Index size)
{
if (m_allocatedSize < size)
reallocate(size);
m_size = convert_index(size);
}
StorageIndex size() const { return m_size; }
protected:
StorageIndex convert_index(Index idx)
{
return internal::convert_index<StorageIndex>(idx);
}
void reallocate(Index size)
{
// if the size of the matrix is not too large, let's allocate a bit more than needed such
// that we can handle dense vector even in sparse mode.
delete[] m_buffer;
if (size<1000)
{
Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
m_buffer = new Scalar[allocSize];
}
else
{
m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
m_buffer = new Scalar[size];
}
m_size = convert_index(size);
m_start = 0;
m_end = m_size;
}
void reallocateSparse()
{
Index copyElements = m_allocatedElements;
m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);
Index allocSize = m_allocatedElements * sizeof(ListEl);
allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
Scalar* newBuffer = new Scalar[allocSize];
memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
delete[] m_buffer;
m_buffer = newBuffer;
}
protected:
// element type of the linked list
struct ListEl
{
StorageIndex next;
StorageIndex index;
Scalar value;
};
// used to store data in both mode
Scalar* m_buffer;
Scalar m_zero;
StorageIndex m_size;
StorageIndex m_start;
StorageIndex m_end;
StorageIndex m_allocatedSize;
StorageIndex m_allocatedElements;
StorageIndex m_mode;
// linked list mode
StorageIndex m_llStart;
StorageIndex m_llCurrent;
StorageIndex m_llSize;
};
/** \returns the number of non zeros in the current sub vector */
template<typename _Scalar,typename _StorageIndex>
Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
{
if (m_mode==IsSparse)
return m_llSize;
else
return m_end - m_start;
}
template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
{
if (estimatedDensity>0.1)
init(IsDense);
else
init(IsSparse);
}
template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
{
m_mode = mode;
if (m_mode==IsSparse)
{
m_llSize = 0;
m_llStart = -1;
}
}
/** Must be called whenever we might perform a write access
* with an index smaller than the previous one.
*
* Don't worry, this function is extremely cheap.
*/
template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::restart()
{
m_llCurrent = m_llStart;
}
/** Set all coefficients of current subvector to zero */
template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::setZero()
{
if (m_mode==IsDense)
{
for (Index i=m_start; i<m_end; ++i)
m_buffer[i] = Scalar(0);
}
else
{
eigen_assert(m_mode==IsSparse);
m_llSize = 0;
m_llStart = -1;
}
}
template<typename _Scalar,typename _StorageIndex>
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
// TODO factorize the following code to reduce code generation
eigen_assert(m_mode==IsSparse);
if (m_llSize==0)
{
// this is the first element
m_llStart = 0;
m_llCurrent = 0;
++m_llSize;
llElements[0].value = Scalar(0);
llElements[0].index = convert_index(i);
llElements[0].next = -1;
return llElements[0].value;
}
else if (i<llElements[m_llStart].index)
{
// this is going to be the new first element of the list
ListEl& el = llElements[m_llSize];
el.value = Scalar(0);
el.index = convert_index(i);
el.next = m_llStart;
m_llStart = m_llSize;
++m_llSize;
m_llCurrent = m_llStart;
return el.value;
}
else
{
StorageIndex nextel = llElements[m_llCurrent].next;
eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
while (nextel >= 0 && llElements[nextel].index<=i)
{
m_llCurrent = nextel;
nextel = llElements[nextel].next;
}
if (llElements[m_llCurrent].index==i)
{
// the coefficient already exists and we found it !
return llElements[m_llCurrent].value;
}
else
{
if (m_llSize>=m_allocatedElements)
{
reallocateSparse();
llElements = reinterpret_cast<ListEl*>(m_buffer);
}
eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
// let's insert a new coefficient
ListEl& el = llElements[m_llSize];
el.value = Scalar(0);
el.index = convert_index(i);
el.next = llElements[m_llCurrent].next;
llElements[m_llCurrent].next = m_llSize;
++m_llSize;
return el.value;
}
}
}
}
template<typename _Scalar,typename _StorageIndex>
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
eigen_assert(m_mode==IsSparse);
if ((m_llSize==0) || (i<llElements[m_llStart].index))
{
return m_zero;
}
else
{
Index elid = m_llStart;
while (elid >= 0 && llElements[elid].index<i)
elid = llElements[elid].next;
if (llElements[elid].index==i)
return llElements[m_llCurrent].value;
else
return m_zero;
}
}
}
/** Iterator over the nonzero coefficients */
template<typename _Scalar,typename _StorageIndex>
class AmbiVector<_Scalar,_StorageIndex>::Iterator
{
public:
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
/** Default constructor
* \param vec the vector on which we iterate
* \param epsilon the minimal value used to prune zero coefficients.
* In practice, all coefficients having a magnitude smaller than \a epsilon
* are skipped.
*/
explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
: m_vector(vec)
{
using std::abs;
m_epsilon = epsilon;
m_isDense = m_vector.m_mode==IsDense;
if (m_isDense)
{
m_currentEl = 0; // this is to avoid a compilation warning
m_cachedValue = 0; // this is to avoid a compilation warning
m_cachedIndex = m_vector.m_start-1;
++(*this);
}
else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
m_currentEl = m_vector.m_llStart;
while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
m_currentEl = llElements[m_currentEl].next;
if (m_currentEl<0)
{
m_cachedValue = 0; // this is to avoid a compilation warning
m_cachedIndex = -1;
}
else
{
m_cachedIndex = llElements[m_currentEl].index;
m_cachedValue = llElements[m_currentEl].value;
}
}
}
StorageIndex index() const { return m_cachedIndex; }
Scalar value() const { return m_cachedValue; }
operator bool() const { return m_cachedIndex>=0; }
Iterator& operator++()
{
using std::abs;
if (m_isDense)
{
do {
++m_cachedIndex;
} while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon);
if (m_cachedIndex<m_vector.m_end)
m_cachedValue = m_vector.m_buffer[m_cachedIndex];
else
m_cachedIndex=-1;
}
else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
do {
m_currentEl = llElements[m_currentEl].next;
} while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon);
if (m_currentEl<0)
{
m_cachedIndex = -1;
}
else
{
m_cachedIndex = llElements[m_currentEl].index;
m_cachedValue = llElements[m_currentEl].value;
}
}
return *this;
}
protected:
const AmbiVector& m_vector; // the target vector
StorageIndex m_currentEl; // the current element in sparse/linked-list mode
RealScalar m_epsilon; // epsilon used to prune zero coefficients
StorageIndex m_cachedIndex; // current coordinate
Scalar m_cachedValue; // current value
bool m_isDense; // mode of the vector
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_AMBIVECTOR_H
| 10,532 | 26.865079 | 136 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/CompressedStorage.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMPRESSED_STORAGE_H
#define EIGEN_COMPRESSED_STORAGE_H
namespace Eigen {
namespace internal {
/** \internal
* Stores a sparse set of values as a list of values and a list of indices.
*
*/
template<typename _Scalar,typename _StorageIndex>
class CompressedStorage
{
public:
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
protected:
typedef typename NumTraits<Scalar>::Real RealScalar;
public:
CompressedStorage()
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{}
explicit CompressedStorage(Index size)
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{
resize(size);
}
CompressedStorage(const CompressedStorage& other)
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{
*this = other;
}
CompressedStorage& operator=(const CompressedStorage& other)
{
resize(other.size());
if(other.size()>0)
{
internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
}
return *this;
}
void swap(CompressedStorage& other)
{
std::swap(m_values, other.m_values);
std::swap(m_indices, other.m_indices);
std::swap(m_size, other.m_size);
std::swap(m_allocatedSize, other.m_allocatedSize);
}
~CompressedStorage()
{
delete[] m_values;
delete[] m_indices;
}
void reserve(Index size)
{
Index newAllocatedSize = m_size + size;
if (newAllocatedSize > m_allocatedSize)
reallocate(newAllocatedSize);
}
void squeeze()
{
if (m_allocatedSize>m_size)
reallocate(m_size);
}
void resize(Index size, double reserveSizeFactor = 0)
{
if (m_allocatedSize<size)
{
Index realloc_size = (std::min<Index>)(NumTraits<StorageIndex>::highest(), size + Index(reserveSizeFactor*double(size)));
if(realloc_size<size)
internal::throw_std_bad_alloc();
reallocate(realloc_size);
}
m_size = size;
}
void append(const Scalar& v, Index i)
{
Index id = m_size;
resize(m_size+1, 1);
m_values[id] = v;
m_indices[id] = internal::convert_index<StorageIndex>(i);
}
inline Index size() const { return m_size; }
inline Index allocatedSize() const { return m_allocatedSize; }
inline void clear() { m_size = 0; }
const Scalar* valuePtr() const { return m_values; }
Scalar* valuePtr() { return m_values; }
const StorageIndex* indexPtr() const { return m_indices; }
StorageIndex* indexPtr() { return m_indices; }
inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; }
inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; }
inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
inline Index searchLowerIndex(Index key) const
{
return searchLowerIndex(0, m_size, key);
}
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
inline Index searchLowerIndex(Index start, Index end, Index key) const
{
while(end>start)
{
Index mid = (end+start)>>1;
if (m_indices[mid]<key)
start = mid+1;
else
end = mid;
}
return start;
}
/** \returns the stored value at index \a key
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
{
if (m_size==0)
return defaultValue;
else if (key==m_indices[m_size-1])
return m_values[m_size-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
const Index id = searchLowerIndex(0,m_size-1,key);
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
/** Like at(), but the search is performed in the range [start,end) */
inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const
{
if (start>=end)
return defaultValue;
else if (end>start && key==m_indices[end-1])
return m_values[end-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
const Index id = searchLowerIndex(start,end-1,key);
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
/** \returns a reference to the value at index \a key
* If the value does not exist, then the value \a defaultValue is inserted
* such that the keys are sorted. */
inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
{
Index id = searchLowerIndex(0,m_size,key);
if (id>=m_size || m_indices[id]!=key)
{
if (m_allocatedSize<m_size+1)
{
m_allocatedSize = 2*(m_size+1);
internal::scoped_array<Scalar> newValues(m_allocatedSize);
internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
// copy first chunk
internal::smart_copy(m_values, m_values +id, newValues.ptr());
internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());
// copy the rest
if(m_size>id)
{
internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
}
std::swap(m_values,newValues.ptr());
std::swap(m_indices,newIndices.ptr());
}
else if(m_size>id)
{
internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
}
m_size++;
m_indices[id] = internal::convert_index<StorageIndex>(key);
m_values[id] = defaultValue;
}
return m_values[id];
}
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
Index k = 0;
Index n = size();
for (Index i=0; i<n; ++i)
{
if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
{
value(k) = value(i);
index(k) = index(i);
++k;
}
}
resize(k,0);
}
protected:
inline void reallocate(Index size)
{
#ifdef EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
#endif
eigen_internal_assert(size!=m_allocatedSize);
internal::scoped_array<Scalar> newValues(size);
internal::scoped_array<StorageIndex> newIndices(size);
Index copySize = (std::min)(size, m_size);
if (copySize>0) {
internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
}
std::swap(m_values,newValues.ptr());
std::swap(m_indices,newIndices.ptr());
m_allocatedSize = size;
}
protected:
Scalar* m_values;
StorageIndex* m_indices;
Index m_size;
Index m_allocatedSize;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_COMPRESSED_STORAGE_H
| 8,164 | 30.525097 | 130 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
namespace Eigen {
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
{
typedef typename remove_all<Lhs>::type::Scalar Scalar;
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
ei_declare_aligned_stack_constructed_variable(Scalar, values, rows, 0);
ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
std::memset(mask,0,sizeof(bool)*rows);
evaluator<Lhs> lhsEval(lhs);
evaluator<Rhs> rhsEval(rhs);
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
res.setZero();
res.reserve(Index(estimated_nnz_prod));
// we compute each column of the result, one after the other
for (Index j=0; j<cols; ++j)
{
res.startVec(j);
Index nnz = 0;
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
Scalar y = rhsIt.value();
Index k = rhsIt.index();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
{
Index i = lhsIt.index();
Scalar x = lhsIt.value();
if(!mask[i])
{
mask[i] = true;
values[i] = x * y;
indices[nnz] = i;
++nnz;
}
else
values[i] += x * y;
}
}
if(!sortedInsertion)
{
// unordered insertion
for(Index k=0; k<nnz; ++k)
{
Index i = indices[k];
res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
}
else
{
// alternative ordered insertion code:
const Index t200 = rows/11; // 11 == (log2(200)*1.39)
const Index t = (rows*100)/139;
// FIXME reserve nnz non zeros
// FIXME implement faster sorting algorithms for very small nnz
// if the result is sparse enough => use a quick sort
// otherwise => loop through the entire vector
// In order to avoid to perform an expensive log2 when the
// result is clearly very sparse we use a linear bound up to 200.
if((nnz<200 && nnz<t200) || nnz * numext::log2(int(nnz)) < t)
{
if(nnz>1) std::sort(indices,indices+nnz);
for(Index k=0; k<nnz; ++k)
{
Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
mask[i] = false;
}
}
else
{
// dense path
for(Index i=0; i<rows; ++i)
{
if(mask[i])
{
mask[i] = false;
res.insertBackByOuterInner(j,i) = values[i];
}
}
}
}
}
res.finalize();
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType,
int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
int ResStorageOrder = (traits<ResultType>::Flags&RowMajorBit) ? RowMajor : ColMajor>
struct conservative_sparse_sparse_product_selector;
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
{
typedef typename remove_all<Lhs>::type LhsCleaned;
typedef typename LhsCleaned::Scalar Scalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;
// If the result is tall and thin (in the extreme case a column vector)
// then it is faster to sort the coefficients inplace instead of transposing twice.
// FIXME, the following heuristic is probably not very good.
if(lhs.rows()>rhs.cols())
{
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
// perform sorted insertion
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, true);
res = resCol.markAsRValue();
}
else
{
ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());
// ressort to transpose to sort the entries
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);
RowMajorMatrix resRow(resCol);
res = resRow.markAsRValue();
}
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
RowMajorMatrix rhsRow = rhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
res = resRow;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
RowMajorMatrix lhsRow = lhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
res = resRow;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
res = resRow;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
{
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
res = resCol;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
ColMajorMatrix lhsCol = lhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
res = resCol;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
ColMajorMatrix rhsCol = rhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
res = resCol;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
// sort the non zeros:
ColMajorMatrix resCol(resRow);
res = resCol;
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef typename remove_all<Lhs>::type::Scalar Scalar;
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
evaluator<Lhs> lhsEval(lhs);
evaluator<Rhs> rhsEval(rhs);
for (Index j=0; j<cols; ++j)
{
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
Scalar y = rhsIt.value();
Index k = rhsIt.index();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
{
Index i = lhsIt.index();
Scalar x = lhsIt.value();
res.coeffRef(i,j) += x * y;
}
}
}
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType,
int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor>
struct sparse_sparse_to_dense_product_selector;
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
internal::sparse_sparse_to_dense_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
ColMajorMatrix lhsCol(lhs);
internal::sparse_sparse_to_dense_product_impl<ColMajorMatrix,Rhs,ResultType>(lhsCol, rhs, res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
ColMajorMatrix rhsCol(rhs);
internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorMatrix,ResultType>(lhs, rhsCol, res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
Transpose<ResultType> trRes(res);
internal::sparse_sparse_to_dense_product_impl<Rhs,Lhs,Transpose<ResultType> >(rhs, lhs, trRes);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
| 12,655 | 35.578035 | 158 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/MappedSparseMatrix.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
#define EIGEN_MAPPED_SPARSEMATRIX_H
namespace Eigen {
/** \deprecated Use Map<SparseMatrix<> >
* \class MappedSparseMatrix
*
* \brief Sparse matrix
*
* \param _Scalar the scalar type, i.e. the type of the coefficients
*
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
*
*/
namespace internal {
template<typename _Scalar, int _Flags, typename _StorageIndex>
struct traits<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> > : traits<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
{};
} // end namespace internal
template<typename _Scalar, int _Flags, typename _StorageIndex>
class MappedSparseMatrix
: public Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
{
typedef Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> > Base;
public:
typedef typename Base::StorageIndex StorageIndex;
typedef typename Base::Scalar Scalar;
inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZeroPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)
{}
/** Empty destructor */
inline ~MappedSparseMatrix() {}
};
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct evaluator<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> >
: evaluator<SparseCompressedBase<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> > >
{
typedef MappedSparseMatrix<_Scalar,_Options,_StorageIndex> XprType;
typedef evaluator<SparseCompressedBase<XprType> > Base;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
}
} // end namespace Eigen
#endif // EIGEN_MAPPED_SPARSEMATRIX_H
| 2,191 | 31.235294 | 175 |
h
|
abess
|
abess-master/python/include/Eigen/src/SparseCore/SparseAssign.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEASSIGN_H
#define EIGEN_SPARSEASSIGN_H
namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
{
internal::call_assignment_no_alias(derived(), other.derived());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
{
// TODO use the evaluator mechanism
other.evalTo(derived());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
// by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
{
internal::call_assignment_no_alias(derived(), other.derived());
return derived();
}
namespace internal {
template<>
struct storage_kind_to_evaluator_kind<Sparse> {
typedef IteratorBased Kind;
};
template<>
struct storage_kind_to_shape<Sparse> {
typedef SparseShape Shape;
};
struct Sparse2Sparse {};
struct Sparse2Dense {};
template<> struct AssignmentKind<SparseShape, SparseShape> { typedef Sparse2Sparse Kind; };
template<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; };
template<> struct AssignmentKind<DenseShape, SparseShape> { typedef Sparse2Dense Kind; };
template<> struct AssignmentKind<DenseShape, SparseTriangularShape> { typedef Sparse2Dense Kind; };
template<typename DstXprType, typename SrcXprType>
void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
{
typedef typename DstXprType::Scalar Scalar;
typedef internal::evaluator<DstXprType> DstEvaluatorType;
typedef internal::evaluator<SrcXprType> SrcEvaluatorType;
SrcEvaluatorType srcEvaluator(src);
const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
if ((!transpose) && src.isRValue())
{
// eval without temporary
dst.resize(src.rows(), src.cols());
dst.setZero();
dst.reserve((std::max)(src.rows(),src.cols())*2);
for (Index j=0; j<outerEvaluationSize; ++j)
{
dst.startVec(j);
for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
{
Scalar v = it.value();
dst.insertBackByOuterInner(j,it.index()) = v;
}
}
dst.finalize();
}
else
{
// eval through a temporary
eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
(!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
"the transpose operation is supposed to be handled in SparseMatrix::operator=");
enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
DstXprType temp(src.rows(), src.cols());
temp.reserve((std::max)(src.rows(),src.cols())*2);
for (Index j=0; j<outerEvaluationSize; ++j)
{
temp.startVec(j);
for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
{
Scalar v = it.value();
temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
}
}
temp.finalize();
dst = temp.markAsRValue();
}
}
// Generic Sparse to Sparse assignment
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
{
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
assign_sparse_to_sparse(dst.derived(), src.derived());
}
};
// Generic Sparse to Dense assignment
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
{
static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
dst.setZero();
internal::evaluator<SrcXprType> srcEval(src);
resize_if_allowed(dst, src, func);
internal::evaluator<DstXprType> dstEval(dst);
const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
for (Index j=0; j<outerEvaluationSize; ++j)
for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
}
};
// Specialization for "dst = dec.solve(rhs)"
// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
{
typedef Solve<DecType,RhsType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
src.dec()._solve_impl(src.rhs(), dst);
}
};
struct Diagonal2Sparse {};
template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
{
typedef typename DstXprType::StorageIndex StorageIndex;
typedef typename DstXprType::Scalar Scalar;
typedef Array<StorageIndex,Dynamic,1> ArrayXI;
typedef Array<Scalar,Dynamic,1> ArrayXS;
template<int Options>
static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
Index size = src.diagonal().size();
dst.makeCompressed();
dst.resizeNonZeros(size);
Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
}
template<typename DstDerived>
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
dst.diagonal() = src.diagonal();
}
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() += src.diagonal(); }
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() -= src.diagonal(); }
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEASSIGN_H
| 8,080 | 36.239631 | 182 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.